diff --git a/.github/workflows/pr-dependabot.yaml b/.github/workflows/pr-dependabot.yaml index e4e782ed2a..5cdc830b5e 100644 --- a/.github/workflows/pr-dependabot.yaml +++ b/.github/workflows/pr-dependabot.yaml @@ -19,15 +19,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code into the Go module directory - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 - name: Calculate go version id: vars run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # tag=v6.1.0 with: go-version: ${{ steps.vars.outputs.go_version }} - - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # tag=v4.2.3 + - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # tag=v4.3.0 name: Restore go cache with: path: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b736600bd0..587c369742 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,13 +17,13 @@ jobs: - name: Set env run: echo "RELEASE_TAG=${GITHUB_REF:10}" >> $GITHUB_ENV - name: checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 with: fetch-depth: 0 - name: Calculate go version run: echo "go_version=$(make go-version)" >> $GITHUB_ENV - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # tag=v6.1.0 with: go-version: ${{ env.go_version }} - name: generate release artifacts @@ -37,7 +37,7 @@ jobs: env: GH_TOKEN: ${{ github.token }} - name: Release - uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 + uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # tag=v2.5.0 with: draft: true files: out/* diff --git a/.github/workflows/security-scan.yaml b/.github/workflows/security-scan.yaml index f5bdb42f97..8712f8294b 100644 --- a/.github/workflows/security-scan.yaml +++ b/.github/workflows/security-scan.yaml @@ -18,14 +18,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 with: ref: ${{ matrix.branch }} - name: Calculate go version id: vars run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # tag=v6.1.0 with: go-version: ${{ steps.vars.outputs.go_version }} - name: Run verify security target diff --git a/Dockerfile b/Dockerfile index ad6c6b4b9b..651b3ca31e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,7 @@ # Build the manager binary ARG GO_VERSION -FROM golang:${GO_VERSION} AS builder +FROM golang:${GO_VERSION:-1.24.11} AS builder WORKDIR /workspace # Run this with docker build --build_arg goproxy=$(go env GOPROXY) to override the goproxy @@ -28,7 +28,7 @@ COPY go.sum go.sum # Cache deps before building and copying source so that we don't need to re-download as much # and so that source changes don't invalidate our downloaded layer RUN --mount=type=cache,target=/go/pkg/mod \ - go mod download + go mod download # Copy the sources COPY ./ ./ @@ -40,10 +40,10 @@ ARG ldflags # Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder RUN --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=cache,target=/go/pkg/mod \ - CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ - go build -ldflags "${ldflags} -extldflags '-static'" \ - -o manager ${package} + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -ldflags "${ldflags} -extldflags '-static'" \ + -o manager ${package} # Production image FROM gcr.io/distroless/static:nonroot diff --git a/Makefile b/Makefile index 48fcc2402b..39291b665c 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ unexport GOPATH TRACE ?= 0 # Go -GO_VERSION ?= 1.23.10 +GO_VERSION ?= 1.24.11 # Directories. ARTIFACTS ?= $(REPO_ROOT)/_artifacts @@ -69,6 +69,11 @@ GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint GOTESTSUM := $(TOOLS_BIN_DIR)/gotestsum KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize MOCKGEN := $(TOOLS_BIN_DIR)/mockgen +OPENAPI_GEN := $(TOOLS_BIN_DIR)/openapi-gen +APPLYCONFIGURATION_GEN := $(TOOLS_BIN_DIR)/applyconfiguration-gen +CLIENT_GEN := $(TOOLS_BIN_DIR)/client-gen +LISTER_GEN := $(TOOLS_BIN_DIR)/lister-gen +INFORMER_GEN := $(TOOLS_BIN_DIR)/informer-gen RELEASE_NOTES := $(TOOLS_BIN_DIR)/release-notes SETUP_ENVTEST := $(TOOLS_BIN_DIR)/setup-envtest GEN_CRD_API_REFERENCE_DOCS := $(TOOLS_BIN_DIR)/gen-crd-api-reference-docs @@ -317,8 +322,66 @@ generate-controller-gen: $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate/boilerplate.generatego.txt .PHONY: generate-codegen -generate-codegen: generate-controller-gen - ./hack/update-codegen.sh +generate-codegen: generate-controller-gen $(OPENAPI_GEN) $(APPLYCONFIGURATION_GEN) $(CLIENT_GEN) $(LISTER_GEN) $(INFORMER_GEN) + @echo "** Generating OpenAPI definitions **" + # The package list includes: + # - CAPO's own API packages (v1alpha1, v1alpha7, v1beta1) that have // +k8s:openapi-gen= markers + # - Dependency packages from CAPI and k8s.io that are referenced by CAPO's APIs + # - Base k8s.io/apimachinery packages + $(OPENAPI_GEN) \ + --go-header-file=./hack/boilerplate.go.txt \ + --output-file=zz_generated.openapi.go \ + --output-dir=./cmd/models-schema \ + --output-pkg=main \ + --report-filename=./api_violations.report \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1 \ + sigs.k8s.io/cluster-api/api/v1beta1 \ + k8s.io/api/core/v1 \ + k8s.io/apimachinery/pkg/apis/meta/v1 \ + k8s.io/apimachinery/pkg/runtime \ + k8s.io/apimachinery/pkg/version + @echo "** Generating openapi.json **" + go run ./cmd/models-schema | jq > ./openapi.json + @echo "** Generating applyconfiguration code **" + $(APPLYCONFIGURATION_GEN) \ + --go-header-file=./hack/boilerplate.go.txt \ + --output-dir=./pkg/generated/applyconfiguration \ + --output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration \ + --openapi-schema=./openapi.json \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1 + @echo "** Generating clientset code **" + $(CLIENT_GEN) \ + --go-header-file=./hack/boilerplate.go.txt \ + --output-dir=./pkg/generated/clientset \ + --output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/clientset \ + --clientset-name=clientset \ + --input-base=sigs.k8s.io/cluster-api-provider-openstack \ + --apply-configuration-package=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration \ + --input=api/v1alpha1 \ + --input=api/v1alpha7 \ + --input=api/v1beta1 + @echo "** Generating lister code **" + $(LISTER_GEN) \ + --go-header-file=./hack/boilerplate.go.txt \ + --output-dir=./pkg/generated/listers \ + --output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/listers \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1 + @echo "** Generating informer code **" + $(INFORMER_GEN) \ + --go-header-file=./hack/boilerplate.go.txt \ + --output-dir=./pkg/generated/informers \ + --output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/informers \ + --versioned-clientset-package=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/clientset/clientset \ + --listers-package=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/listers \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \ + sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1 .PHONY: generate-conversion-gen generate-conversion-gen: $(CONVERSION_GEN) @@ -517,6 +580,8 @@ templates/cluster-template-%.yaml: kustomize/v1beta1/% $(KUSTOMIZE) FORCE .PHONY: release-templates release-templates: $(RELEASE_DIR) templates ## Generate release templates cp templates/cluster-template*.yaml $(RELEASE_DIR)/ + cp templates/clusterclass*.yaml $(RELEASE_DIR)/ + cp templates/image-template*.yaml $(RELEASE_DIR)/ IMAGE_PATCH_DIR := $(ARTIFACTS)/image-patch @@ -587,7 +652,7 @@ clean-release-git: ## Restores the git files usually modified during a release git restore ./*manager_image_patch.yaml ./*manager_pull_policy.yaml .PHONY: verify -verify: verify-boilerplate verify-modules verify-gen verify-govulncheck +verify: verify-boilerplate verify-modules verify-gen .PHONY: verify-boilerplate verify-boilerplate: diff --git a/OWNERS b/OWNERS index ea64e2e7b8..af00278ed6 100644 --- a/OWNERS +++ b/OWNERS @@ -21,3 +21,5 @@ emeritus_approvers: - chrischdi - tobiasgiese - seanschneeweiss + - jichenjc + - mdbooth diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index d2efce1b97..d5c4322787 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -21,7 +21,6 @@ aliases: cluster-api-openstack-maintainers: - emilienm - lentzi90 - - mdbooth cluster-api-openstack-reviewers: - cluster-api-openstack-emeritus-maintainers: - - jichenjc + - bnallapeta + - smoshiur1237 diff --git a/cloudbuild.yaml b/cloudbuild.yaml index 49ec67656a..23783a278d 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -4,15 +4,19 @@ options: substitution_option: ALLOW_LOOSE machineType: 'N1_HIGHCPU_8' steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20241229-5dc092c636' - entrypoint: make - env: - - DOCKER_CLI_EXPERIMENTAL=enabled - - TAG=$_GIT_TAG - - PULL_BASE_REF=$_PULL_BASE_REF - - DOCKER_BUILDKIT=1 - args: - - release-staging +# To check if the image can handle the build, you can try it like this: +# docker run --rm -it -v $(pwd):/workspace gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:${TAG} +# make clean # make sure we have something to build +# make staging-manifests +- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20250116-2a05ea7e3d' + entrypoint: make + env: + - DOCKER_CLI_EXPERIMENTAL=enabled + - TAG=$_GIT_TAG + - PULL_BASE_REF=$_PULL_BASE_REF + - DOCKER_BUILDKIT=1 + args: + - release-staging substitutions: # _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and # can be used as a substitution diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml index 9cf26134e4..36d4cc6e40 100644 --- a/config/webhook/kustomization.yaml +++ b/config/webhook/kustomization.yaml @@ -1,6 +1,3 @@ resources: - manifests.yaml - service.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml deleted file mode 100644 index 25e21e3c96..0000000000 --- a/config/webhook/kustomizeconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# the following config is for teaching kustomize where to look at when substituting vars. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true - -varReference: -- path: metadata/annotations diff --git a/docs/book/src/development/development.md b/docs/book/src/development/development.md index 705c0bd4e1..f9632a4028 100644 --- a/docs/book/src/development/development.md +++ b/docs/book/src/development/development.md @@ -38,7 +38,7 @@ This document explains how to develop Cluster API Provider OpenStack (CAPO). Note that CAPO depends on ORC. No matter how you choose to work, you will need to deploy ORC in order to make CAPO functional: ```bash -kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/download/v1.0.0/install.yaml +kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/latest/download/install.yaml ``` TL;DR: Here is a short version for how to develop with Tilt: @@ -58,11 +58,12 @@ export RESOURCE_TYPE=... make tilt-up # Back in CAPO repo # Install ORC -kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/download/v1.0.0/install.yaml +kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/latest/download/install.yaml # Create secret with clouds.yaml (the file is created by create_devstack.sh) kubectl create secret generic dev-test-cloud-config --from-file=clouds.yaml # Add images to use in the tests -clusterctl generate yaml --from templates/images-template.yaml | kubectl apply -f - +clusterctl generate yaml --from templates/image-template-node.yaml | kubectl apply -f - +clusterctl generate yaml --from templates/image-template-bastion.yaml | kubectl apply -f - ``` At this point, you should be able to apply the `dev-test` ClusterClass and start creating/deleting `development` clusters through the Tilt UI. @@ -95,10 +96,11 @@ After generating `infrastructure-components.yaml`, replace the `us.gcr.io/k8s-ar ## Automatically Adding Images to OpenStack Before you can create a Cluster, you will need a suitable image in OpenStack. -There is a convenient template available in `templates/images-template.yaml` for this purpose. +There are convenient templates available in `templates/image-template-*.yaml` for this purpose. +For example: ```bash -clusterctl generate yaml --from templates/images-template.yaml | kubectl apply -f - +clusterctl generate yaml --from templates/image-template-node.yaml | kubectl apply -f - ``` ## Testing Cluster Creation using the 'dev-test' ClusterClass with Tilt diff --git a/go.mod b/go.mod index a004f59945..304b928108 100644 --- a/go.mod +++ b/go.mod @@ -8,32 +8,32 @@ require ( github.com/google/go-cmp v0.7.0 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud/v2 v2.7.0 + github.com/gophercloud/gophercloud/v2 v2.9.0 github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26 - github.com/hashicorp/go-version v1.7.0 + github.com/hashicorp/go-version v1.8.0 github.com/k-orc/openstack-resource-controller v1.0.2 - github.com/onsi/ginkgo/v2 v2.23.4 - github.com/onsi/gomega v1.37.0 - github.com/prometheus/client_golang v1.22.0 - github.com/spf13/pflag v1.0.6 - go.uber.org/mock v0.5.2 - golang.org/x/crypto v0.39.0 - golang.org/x/text v0.26.0 + github.com/onsi/ginkgo/v2 v2.27.3 + github.com/onsi/gomega v1.38.2 + github.com/prometheus/client_golang v1.23.2 + github.com/spf13/pflag v1.0.10 + go.uber.org/mock v0.6.0 + golang.org/x/crypto v0.41.0 + golang.org/x/text v0.28.0 gopkg.in/ini.v1 v1.67.0 - k8s.io/api v0.31.9 - k8s.io/apiextensions-apiserver v0.31.9 - k8s.io/apimachinery v0.31.9 - k8s.io/client-go v0.31.9 - k8s.io/code-generator v0.31.9 - k8s.io/component-base v0.31.9 + k8s.io/api v0.31.14 + k8s.io/apiextensions-apiserver v0.31.14 + k8s.io/apimachinery v0.31.14 + k8s.io/client-go v0.31.14 + k8s.io/code-generator v0.31.14 + k8s.io/component-base v0.31.14 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 - sigs.k8s.io/cluster-api v1.9.8 - sigs.k8s.io/cluster-api/test v1.9.8 + sigs.k8s.io/cluster-api v1.9.11 + sigs.k8s.io/cluster-api/test v1.9.11 sigs.k8s.io/controller-runtime v0.19.7 sigs.k8s.io/structured-merge-diff/v4 v4.7.0 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/yaml v1.6.0 ) require ( @@ -41,7 +41,7 @@ require ( github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect @@ -54,9 +54,9 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.6.1 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v27.3.1+incompatible // indirect + github.com/docker/docker v28.0.2+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -106,9 +106,9 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect @@ -129,28 +129,30 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.40.0 // indirect - golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.33.0 // indirect + golang.org/x/tools v0.36.0 // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/grpc v1.65.1 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.31.9 // indirect + k8s.io/apiserver v0.31.14 // indirect k8s.io/cluster-bootstrap v0.31.3 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect diff --git a/go.sum b/go.sum index 83ee1624f2..9ea016fef2 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= @@ -36,14 +36,14 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= -github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.27 h1:WIIw5sU0LfGgoGnhdrYdVcto/aWmJoGA/C62iwkU0JM= +github.com/coredns/corefile-migration v1.0.27/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= @@ -57,8 +57,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= +github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -81,6 +81,12 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -99,6 +105,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk= github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -133,8 +141,8 @@ github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+ github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= -github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= +github.com/gophercloud/gophercloud/v2 v2.9.0 h1:Y9OMrwKF9EDERcHFSOTpf/6XGoAI0yOxmsLmQki4LPM= +github.com/gophercloud/gophercloud/v2 v2.9.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26 h1:N65GYmx5LrMeYdeXcxMESDU+2pDyAOXlFNlHl7siUwM= github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26/go.mod h1:7SHUbtoiSYINNKgAVxse+PMhIio05IK7shHy8DVRaN0= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -147,8 +155,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= @@ -161,6 +169,8 @@ github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9q github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/k-orc/openstack-resource-controller v1.0.2 h1:WhBpyBpc5sIxALwC6Jyl6vtf0TZSlhJd6/1r6jiCTQc= @@ -182,8 +192,12 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -205,10 +219,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= +github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -222,18 +236,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= @@ -255,8 +267,8 @@ github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cA github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= @@ -272,10 +284,18 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= @@ -322,42 +342,44 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -367,23 +389,27 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -398,8 +424,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.65.1 h1:toSN4j5/Xju+HVovfaY5g1YZVuJeHzQZhP8eJ0L0f1I= google.golang.org/grpc v1.65.1/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -418,22 +444,22 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -k8s.io/api v0.31.9 h1:+gN4iZNccfr6y2EX28ZgcAq4yUKNZMhg2Jl72+2hoxQ= -k8s.io/api v0.31.9/go.mod h1:+rao9hnuB9AHXVoqqwxPh493H91pte1ZhfJ6oz1qLJA= -k8s.io/apiextensions-apiserver v0.31.9 h1:5U+Y7vvV+lVqOBjNmmTO42PxoQrp44yzXTHievxEhdY= -k8s.io/apiextensions-apiserver v0.31.9/go.mod h1:tx/XA+SO6HhoXhXqvaeF5+iHlL7dF3wWACB6plC23M8= -k8s.io/apimachinery v0.31.9 h1:sLGkHzsAfWVp55os8PlKw+eeIsB3IeVU1QLb3XKHyg8= -k8s.io/apimachinery v0.31.9/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.9 h1:Ebc8HTjGjkkiW7c2I1yK3WCv2V37P1oikVEOiw14ZeE= -k8s.io/apiserver v0.31.9/go.mod h1:Nl/v7YIM6KDVDFtJDdLWQZu2Px+75vrxlMntZSU7b74= -k8s.io/client-go v0.31.9 h1:SZr3xiDPdGwKeVR+jMYYubk1gJXA/go3obJeG/1Q/to= -k8s.io/client-go v0.31.9/go.mod h1:ZwfOkKABRm2zSNR3s9OkADeyt0zhF9F78tJNupZM8zM= +k8s.io/api v0.31.14 h1:xYn/S/WFJsksI7dk/5uBRd3Umm/D8W5g7sRnd4csotA= +k8s.io/api v0.31.14/go.mod h1:K8fvRey4z73RAuxBZCma7WtY8WFvkViYhfFLCMT4xgA= +k8s.io/apiextensions-apiserver v0.31.14 h1:1KupD0PyU7CgiT/PiZPSgZhTCL2KGwvXd1ejGcxjEfg= +k8s.io/apiextensions-apiserver v0.31.14/go.mod h1:Odk14fSl/zaciI8DRUSPMSH74UXtz4gfinw7zY7YHvE= +k8s.io/apimachinery v0.31.14 h1:/eMIwjv+GFm6A/sSGlB1NupBU6wTDPhEWsju0Fj69kY= +k8s.io/apimachinery v0.31.14/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.14 h1:DORopWIu2qg7gmVyA9UUGGGmO1Rmnq5Oe+GmsKen3yo= +k8s.io/apiserver v0.31.14/go.mod h1:q81QJuh85u/HN74pdw5Ci4EnrRmCOonZj9FvLwf8DWc= +k8s.io/client-go v0.31.14 h1:d4/G0xfksNIbMWH7ghjzOwC5bTAwQ20gABTjZw7fLlQ= +k8s.io/client-go v0.31.14/go.mod h1:0uRpRB7r5QwtsbxEngZPkbcIVoNdAQAPIcopgiXjhQc= k8s.io/cluster-bootstrap v0.31.3 h1:O1Yxk1bLaxZvmQCXLaJjj5iJD+lVMfJdRUuKgbUHPlA= k8s.io/cluster-bootstrap v0.31.3/go.mod h1:TI6TCsQQB4FfcryWgNO3SLXSKWBqHjx4DfyqSFwixj8= -k8s.io/code-generator v0.31.9 h1:BHGXw8ZDNsZk0xAiBOmYxvLfSSayz1oSaBs4q3iWYkM= -k8s.io/code-generator v0.31.9/go.mod h1:W7iHkUd4fWSs3lK39ab8T2Vy22HzRMkgjuS1mCW4KA8= -k8s.io/component-base v0.31.9 h1:trlYOQMxtaSdEcGfQQ9BukKUS7wSIWqOrq4Jnk21hSk= -k8s.io/component-base v0.31.9/go.mod h1:OKZS6U9o/vzKVcwv2zPdMiAWHWhyv5/wwv3aWB5eesI= +k8s.io/code-generator v0.31.14 h1:Qn+Lo0jvH8Z7YlpWle3SjjkQAkDDS0BvefC6ldoRbuk= +k8s.io/code-generator v0.31.14/go.mod h1:O1gjNfUL1q1FaoASAWQW6Iu2Taahark2McS+rBxv/Ic= +k8s.io/component-base v0.31.14 h1:VNjBuEMmvlwL4twRlMmlaVmsodIRaNivXcZoAx1/x7Q= +k8s.io/component-base v0.31.14/go.mod h1:9ogYcJBUdB4VQ/OMgInYVRScC9bguXxSEEZPsInY+uM= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= @@ -444,19 +470,21 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.9.8 h1:VtgUzUgiE16d3P/XP7tIwPgkRXkdLvVj055o7wIQpaI= -sigs.k8s.io/cluster-api v1.9.8/go.mod h1:6N73nqXbB1qTD3Z7zJc5WsRBen35JOflBdP73f23M2g= -sigs.k8s.io/cluster-api/test v1.9.8 h1:WERh3yx0aHQRGoQdWZB7WvHY+xgOhWFxFzw6u9TGKXA= -sigs.k8s.io/cluster-api/test v1.9.8/go.mod h1:YL2wANe8TFWFBka9CDkxjPj7KALqUtK+PtKa4ChNIok= +sigs.k8s.io/cluster-api v1.9.11 h1:bWOKVdg9UDxCaLQE5E5nDKFpca63jlcgmSx2wun/2+Q= +sigs.k8s.io/cluster-api v1.9.11/go.mod h1:7ieY929gex3urS4k9+s2hnA7OTLEpjftAjXV5hAVhsA= +sigs.k8s.io/cluster-api/test v1.9.11 h1:2aEOb499Z7vVYl+PxIS/j01R9ycXteladgx0OXchFUM= +sigs.k8s.io/cluster-api/test v1.9.11/go.mod h1:54bu5pn4TaNINcpZzKkLbIltjrxlSzXCJ7xN+0/xlks= sigs.k8s.io/controller-runtime v0.19.7 h1:DLABZfMr20A+AwCZOHhcbcu+TqBXnJZaVBri9K3EO48= sigs.k8s.io/controller-runtime v0.19.7/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.25.0 h1:ugUvgesHKKA0yKmD6QtYTiEev+kPUpGxdTPbMGf8VTU= sigs.k8s.io/kind v0.25.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/ci/cloud-init/controller.yaml.tpl b/hack/ci/cloud-init/controller.yaml.tpl index ace4054679..e89a448571 100644 --- a/hack/ci/cloud-init/controller.yaml.tpl +++ b/hack/ci/cloud-init/controller.yaml.tpl @@ -10,7 +10,7 @@ # Enable Logging LOGFILE=/opt/stack/logs/stack.sh.log VERBOSE=True - LOG_COLOR=True + LOG_COLOR=False # Host tuning ENABLE_SYSCTL_MEM_TUNING="True" @@ -44,7 +44,7 @@ PUBLIC_BRIDGE_MTU=${MTU} ENABLE_CHASSIS_AS_GW="True" OVN_DBS_LOG_LEVEL="dbg" - Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger" + Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn" OVN_L3_CREATE_PUBLIC_NETWORK="True" Q_AGENT="ovn" diff --git a/hack/ci/cloud-init/worker.yaml.tpl b/hack/ci/cloud-init/worker.yaml.tpl index 0a34b69a22..63fd48cb8a 100644 --- a/hack/ci/cloud-init/worker.yaml.tpl +++ b/hack/ci/cloud-init/worker.yaml.tpl @@ -9,7 +9,7 @@ # Enable Logging LOGFILE=/opt/stack/logs/stack.sh.log VERBOSE=True - LOG_COLOR=True + LOG_COLOR=False # Host tuning ENABLE_SYSCTL_MEM_TUNING="True" @@ -41,7 +41,7 @@ PUBLIC_BRIDGE_MTU=${MTU} ENABLE_CHASSIS_AS_GW="False" OVN_DBS_LOG_LEVEL="dbg" - Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger" + Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn" Q_AGENT="ovn" # WORKAROUND: diff --git a/hack/ci/create_devstack.sh b/hack/ci/create_devstack.sh index 60c8ce7f62..24987a9e10 100755 --- a/hack/ci/create_devstack.sh +++ b/hack/ci/create_devstack.sh @@ -31,7 +31,7 @@ source "${scriptdir}/${RESOURCE_TYPE}.sh" CLUSTER_NAME=${CLUSTER_NAME:-"capo-e2e"} -OPENSTACK_RELEASE=${OPENSTACK_RELEASE:-"2024.2"} +OPENSTACK_RELEASE=${OPENSTACK_RELEASE:-"2025.2"} OPENSTACK_ENABLE_HORIZON=${OPENSTACK_ENABLE_HORIZON:-"false"} # Devstack will create a provider network using this range diff --git a/hack/tools/Makefile b/hack/tools/Makefile index 7c5cac78f1..48a943a5cf 100644 --- a/hack/tools/Makefile +++ b/hack/tools/Makefile @@ -71,6 +71,26 @@ CONVERSION_GEN := $(BIN_DIR)/conversion-gen $(CONVERSION_GEN): go.mod go.sum | $(BIN_DIR) go build -tags=tools -o $@ k8s.io/code-generator/cmd/conversion-gen +OPENAPI_GEN := $(BIN_DIR)/openapi-gen +$(OPENAPI_GEN): go.mod go.sum | $(BIN_DIR) + go build -tags=tools -o $@ k8s.io/kube-openapi/cmd/openapi-gen + +APPLYCONFIGURATION_GEN := $(BIN_DIR)/applyconfiguration-gen +$(APPLYCONFIGURATION_GEN): go.mod go.sum | $(BIN_DIR) + go build -tags=tools -o $@ k8s.io/code-generator/cmd/applyconfiguration-gen + +CLIENT_GEN := $(BIN_DIR)/client-gen +$(CLIENT_GEN): go.mod go.sum | $(BIN_DIR) + go build -tags=tools -o $@ k8s.io/code-generator/cmd/client-gen + +LISTER_GEN := $(BIN_DIR)/lister-gen +$(LISTER_GEN): go.mod go.sum | $(BIN_DIR) + go build -tags=tools -o $@ k8s.io/code-generator/cmd/lister-gen + +INFORMER_GEN := $(BIN_DIR)/informer-gen +$(INFORMER_GEN): go.mod go.sum | $(BIN_DIR) + go build -tags=tools -o $@ k8s.io/code-generator/cmd/informer-gen + ENVSUBST := $(BIN_DIR)/envsubst $(ENVSUBST): go.mod go.sum | $(BIN_DIR) # Build envsubst from tools folder. go build -tags=tools -o $@ github.com/a8m/envsubst/cmd/envsubst diff --git a/hack/tools/go.mod b/hack/tools/go.mod index 38c3d91412..0d096f4336 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -6,17 +6,18 @@ require ( github.com/a8m/envsubst v1.4.2 github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20220420215017-3f29e6853552 github.com/itchyny/gojq v0.12.17 - github.com/onsi/ginkgo/v2 v2.23.4 - go.uber.org/mock v0.5.2 - k8s.io/code-generator v0.31.9 + github.com/onsi/ginkgo/v2 v2.27.3 + go.uber.org/mock v0.6.0 + k8s.io/code-generator v0.31.14 sigs.k8s.io/cluster-api-provider-openstack v0.0.0 sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221129083400-679ae3e9e6b6 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230926180527-c93e2abcb28e sigs.k8s.io/controller-tools v0.16.5 - sigs.k8s.io/kustomize/kustomize/v5 v5.6.0 + sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 ) require ( + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -52,10 +53,10 @@ require ( github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gophercloud/gophercloud/v2 v2.7.0 // indirect + github.com/gophercloud/gophercloud/v2 v2.9.0 // indirect github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect @@ -72,18 +73,18 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/gomega v1.37.0 // indirect + github.com/onsi/gomega v1.38.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect @@ -95,52 +96,54 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.40.0 // indirect - golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.33.0 // indirect + golang.org/x/tools v0.36.0 // indirect + golang.org/x/tools/go/expect v0.1.0-deprecated // indirect golang.org/x/tools/go/vcs v0.1.0-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/grpc v1.65.1 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.31.9 // indirect - k8s.io/apiextensions-apiserver v0.31.9 // indirect - k8s.io/apimachinery v0.31.9 // indirect - k8s.io/apiserver v0.31.9 // indirect - k8s.io/client-go v0.31.9 // indirect + k8s.io/api v0.31.14 // indirect + k8s.io/apiextensions-apiserver v0.31.14 // indirect + k8s.io/apimachinery v0.31.14 // indirect + k8s.io/apiserver v0.31.14 // indirect + k8s.io/client-go v0.31.14 // indirect k8s.io/cluster-bootstrap v0.31.3 // indirect - k8s.io/component-base v0.31.9 // indirect + k8s.io/component-base v0.31.14 // indirect k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9 // indirect k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect k8s.io/klog v0.2.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect - sigs.k8s.io/cluster-api v1.9.8 // indirect + sigs.k8s.io/cluster-api v1.9.11 // indirect sigs.k8s.io/controller-runtime v0.19.7 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d // indirect - sigs.k8s.io/kustomize/api v0.19.0 // indirect - sigs.k8s.io/kustomize/cmd/config v0.19.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect + sigs.k8s.io/kustomize/api v0.18.0 // indirect + sigs.k8s.io/kustomize/cmd/config v0.15.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) replace sigs.k8s.io/cluster-api-provider-openstack => ../.. diff --git a/hack/tools/go.sum b/hack/tools/go.sum index a80bbbcfce..3dec2423aa 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -4,8 +4,8 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= @@ -34,12 +34,12 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= -github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.27 h1:WIIw5sU0LfGgoGnhdrYdVcto/aWmJoGA/C62iwkU0JM= +github.com/coredns/corefile-migration v1.0.27/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= @@ -53,8 +53,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= +github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -77,6 +77,12 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -99,6 +105,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk= github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -133,8 +141,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= -github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= +github.com/gophercloud/gophercloud/v2 v2.9.0 h1:Y9OMrwKF9EDERcHFSOTpf/6XGoAI0yOxmsLmQki4LPM= +github.com/gophercloud/gophercloud/v2 v2.9.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26 h1:N65GYmx5LrMeYdeXcxMESDU+2pDyAOXlFNlHl7siUwM= github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26/go.mod h1:7SHUbtoiSYINNKgAVxse+PMhIio05IK7shHy8DVRaN0= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -147,8 +155,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= @@ -165,6 +173,8 @@ github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9q github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/k-orc/openstack-resource-controller v1.0.2 h1:WhBpyBpc5sIxALwC6Jyl6vtf0TZSlhJd6/1r6jiCTQc= @@ -188,6 +198,8 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -195,6 +207,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -218,10 +232,10 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= +github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= @@ -235,21 +249,19 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -275,8 +287,8 @@ github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cA github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= @@ -292,10 +304,18 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= @@ -342,53 +362,55 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -396,8 +418,12 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/tools/go/vcs v0.1.0-deprecated h1:cOIJqWBl99H1dH5LWizPa+0ImeeJq3t3cJjaeOWUAL4= golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -414,8 +440,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.65.1 h1:toSN4j5/Xju+HVovfaY5g1YZVuJeHzQZhP8eJ0L0f1I= google.golang.org/grpc v1.65.1/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -438,22 +464,22 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.9 h1:+gN4iZNccfr6y2EX28ZgcAq4yUKNZMhg2Jl72+2hoxQ= -k8s.io/api v0.31.9/go.mod h1:+rao9hnuB9AHXVoqqwxPh493H91pte1ZhfJ6oz1qLJA= -k8s.io/apiextensions-apiserver v0.31.9 h1:5U+Y7vvV+lVqOBjNmmTO42PxoQrp44yzXTHievxEhdY= -k8s.io/apiextensions-apiserver v0.31.9/go.mod h1:tx/XA+SO6HhoXhXqvaeF5+iHlL7dF3wWACB6plC23M8= -k8s.io/apimachinery v0.31.9 h1:sLGkHzsAfWVp55os8PlKw+eeIsB3IeVU1QLb3XKHyg8= -k8s.io/apimachinery v0.31.9/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.9 h1:Ebc8HTjGjkkiW7c2I1yK3WCv2V37P1oikVEOiw14ZeE= -k8s.io/apiserver v0.31.9/go.mod h1:Nl/v7YIM6KDVDFtJDdLWQZu2Px+75vrxlMntZSU7b74= -k8s.io/client-go v0.31.9 h1:SZr3xiDPdGwKeVR+jMYYubk1gJXA/go3obJeG/1Q/to= -k8s.io/client-go v0.31.9/go.mod h1:ZwfOkKABRm2zSNR3s9OkADeyt0zhF9F78tJNupZM8zM= +k8s.io/api v0.31.14 h1:xYn/S/WFJsksI7dk/5uBRd3Umm/D8W5g7sRnd4csotA= +k8s.io/api v0.31.14/go.mod h1:K8fvRey4z73RAuxBZCma7WtY8WFvkViYhfFLCMT4xgA= +k8s.io/apiextensions-apiserver v0.31.14 h1:1KupD0PyU7CgiT/PiZPSgZhTCL2KGwvXd1ejGcxjEfg= +k8s.io/apiextensions-apiserver v0.31.14/go.mod h1:Odk14fSl/zaciI8DRUSPMSH74UXtz4gfinw7zY7YHvE= +k8s.io/apimachinery v0.31.14 h1:/eMIwjv+GFm6A/sSGlB1NupBU6wTDPhEWsju0Fj69kY= +k8s.io/apimachinery v0.31.14/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.14 h1:DORopWIu2qg7gmVyA9UUGGGmO1Rmnq5Oe+GmsKen3yo= +k8s.io/apiserver v0.31.14/go.mod h1:q81QJuh85u/HN74pdw5Ci4EnrRmCOonZj9FvLwf8DWc= +k8s.io/client-go v0.31.14 h1:d4/G0xfksNIbMWH7ghjzOwC5bTAwQ20gABTjZw7fLlQ= +k8s.io/client-go v0.31.14/go.mod h1:0uRpRB7r5QwtsbxEngZPkbcIVoNdAQAPIcopgiXjhQc= k8s.io/cluster-bootstrap v0.31.3 h1:O1Yxk1bLaxZvmQCXLaJjj5iJD+lVMfJdRUuKgbUHPlA= k8s.io/cluster-bootstrap v0.31.3/go.mod h1:TI6TCsQQB4FfcryWgNO3SLXSKWBqHjx4DfyqSFwixj8= -k8s.io/code-generator v0.31.9 h1:BHGXw8ZDNsZk0xAiBOmYxvLfSSayz1oSaBs4q3iWYkM= -k8s.io/code-generator v0.31.9/go.mod h1:W7iHkUd4fWSs3lK39ab8T2Vy22HzRMkgjuS1mCW4KA8= -k8s.io/component-base v0.31.9 h1:trlYOQMxtaSdEcGfQQ9BukKUS7wSIWqOrq4Jnk21hSk= -k8s.io/component-base v0.31.9/go.mod h1:OKZS6U9o/vzKVcwv2zPdMiAWHWhyv5/wwv3aWB5eesI= +k8s.io/code-generator v0.31.14 h1:Qn+Lo0jvH8Z7YlpWle3SjjkQAkDDS0BvefC6ldoRbuk= +k8s.io/code-generator v0.31.14/go.mod h1:O1gjNfUL1q1FaoASAWQW6Iu2Taahark2McS+rBxv/Ic= +k8s.io/component-base v0.31.14 h1:VNjBuEMmvlwL4twRlMmlaVmsodIRaNivXcZoAx1/x7Q= +k8s.io/component-base v0.31.14/go.mod h1:9ogYcJBUdB4VQ/OMgInYVRScC9bguXxSEEZPsInY+uM= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9 h1:1bLA4Agvs1DILmc+q2Bbcqjx6jOHO7YEFA+G+0aTZoc= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= @@ -463,18 +489,18 @@ k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.9.8 h1:VtgUzUgiE16d3P/XP7tIwPgkRXkdLvVj055o7wIQpaI= -sigs.k8s.io/cluster-api v1.9.8/go.mod h1:6N73nqXbB1qTD3Z7zJc5WsRBen35JOflBdP73f23M2g= +sigs.k8s.io/cluster-api v1.9.11 h1:bWOKVdg9UDxCaLQE5E5nDKFpca63jlcgmSx2wun/2+Q= +sigs.k8s.io/cluster-api v1.9.11/go.mod h1:7ieY929gex3urS4k9+s2hnA7OTLEpjftAjXV5hAVhsA= sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221129083400-679ae3e9e6b6 h1:YF+g/Mr0DF+R0q0tnooUWUxjZ0TtDniMj0fgSh/HA6A= sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221129083400-679ae3e9e6b6/go.mod h1:7luenhlsUTb9obnAferuDFEvhtITw7JjHpXkiDmCmKY= -sigs.k8s.io/cluster-api/test v1.9.8 h1:WERh3yx0aHQRGoQdWZB7WvHY+xgOhWFxFzw6u9TGKXA= -sigs.k8s.io/cluster-api/test v1.9.8/go.mod h1:YL2wANe8TFWFBka9CDkxjPj7KALqUtK+PtKa4ChNIok= +sigs.k8s.io/cluster-api/test v1.9.11 h1:2aEOb499Z7vVYl+PxIS/j01R9ycXteladgx0OXchFUM= +sigs.k8s.io/cluster-api/test v1.9.11/go.mod h1:54bu5pn4TaNINcpZzKkLbIltjrxlSzXCJ7xN+0/xlks= sigs.k8s.io/controller-runtime v0.19.7 h1:DLABZfMr20A+AwCZOHhcbcu+TqBXnJZaVBri9K3EO48= sigs.k8s.io/controller-runtime v0.19.7/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230926180527-c93e2abcb28e h1:xYNzzoK+cwgBnaRqrYFLQCSwMAYcR6a06gf3FJ369Kw= @@ -487,18 +513,20 @@ sigs.k8s.io/kind v0.25.0 h1:ugUvgesHKKA0yKmD6QtYTiEev+kPUpGxdTPbMGf8VTU= sigs.k8s.io/kind v0.25.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw= sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d h1:KLiQzLW3RZJR19+j4pw2h5iioyAyqCkDBEAFdnGa3N8= sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d/go.mod h1:NRdZafr4zSCseLQggdvIMXa7umxf+Q+PJzrj3wFwiGE= -sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= -sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= -sigs.k8s.io/kustomize/cmd/config v0.19.0 h1:D3uASwjHWHmNiEHu3pPJBJMBIsb+auFvHrHql3HAarU= -sigs.k8s.io/kustomize/cmd/config v0.19.0/go.mod h1:29Vvdl26PidPLUDi7nfjYa/I0wHBkwCZp15Nlcc4y98= -sigs.k8s.io/kustomize/kustomize/v5 v5.6.0 h1:MWtRRDWCwQEeW2rnJTqJMuV6Agy56P53SkbVoJpN7wA= -sigs.k8s.io/kustomize/kustomize/v5 v5.6.0/go.mod h1:XuuZiQF7WdcvZzEYyNww9A0p3LazCKeJmCjeycN8e1I= -sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= -sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ= +sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= +sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= +sigs.k8s.io/kustomize/cmd/config v0.15.0 h1:WkdY8V2+8J+W00YbImXa2ke9oegfrHH79e+kywW7EdU= +sigs.k8s.io/kustomize/cmd/config v0.15.0/go.mod h1:Jq57b0nPaoYUlOqg//0JtAh6iibboqMcfbtCYoWPM00= +sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 h1:o1mtt6vpxsxDYaZKrw3BnEtc+pAjLz7UffnIvHNbvW0= +sigs.k8s.io/kustomize/kustomize/v5 v5.5.0/go.mod h1:AeFCmgCrXzmvjWWaeZCyBp6XzG1Y0w1svYus8GhJEOE= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/.gitignore b/hack/tools/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 0000000000..6b061e6174 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/hack/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 0000000000..fbc6332592 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,27 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - govet + - staticcheck + - errcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/hack/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 0000000000..fabe5e43dc --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,268 @@ +# Changelog + +## 3.4.0 (2025-06-27) + +### Added + +- #268: Added property to Constraints to include prereleases for Check and Validate + +### Changed + +- #263: Updated Go testing for 1.24, 1.23, and 1.22 +- #269: Updated the error message handling for message case and wrapping errors +- #266: Restore the ability to have leading 0's when parsing with NewVersion. + Opt-out of this by setting CoerceNewVersion to false. + +### Fixed + +- #257: Fixed the CodeQL link (thanks @dmitris) +- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out + of this by setting DetailedNewVersionErrors to false for faster performance. +- #267: Handle pre-releases for an "and" group if one constraint includes them + +## 3.3.1 (2024-11-19) + +### Fixed + +- #253: Fix for allowing some version that were invalid + +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/openshift/vendor/go.uber.org/automaxprocs/LICENSE b/hack/tools/vendor/github.com/Masterminds/semver/v3/LICENSE.txt similarity index 93% rename from openshift/vendor/go.uber.org/automaxprocs/LICENSE rename to hack/tools/vendor/github.com/Masterminds/semver/v3/LICENSE.txt index 20dcf51d96..9ff7da9c48 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/LICENSE +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2017 Uber Technologies, Inc. +Copyright (C) 2014-2019, Matt Butcher and Matt Farina Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -16,4 +16,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file +THE SOFTWARE. diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/Makefile b/hack/tools/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 0000000000..9ca87a2c79 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,31 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go env GOCACHE + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/README.md b/hack/tools/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 0000000000..2f56c676a5 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,274 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +## Package Versions + +Note, import `github.com/Masterminds/semver/v3` to use the latest version. + +There are three major versions fo the `semver` package. + +* 3.x.x is the stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +There are package level variables that affect how `NewVersion` handles parsing. + +- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant + versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch + part. This enables the use of CalVer in versions even when not compliant with SemVer. + When set to `false` less coercion work is done. +- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when + `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true` + it can provide some more insight into why a version is invalid. Setting + `DetailedNewVersionErrors` to `false` is faster on performance but provides less + detailed error messages if a version fails to parse. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include pre-releases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering pre-releases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The variable a will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification, pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +The `Constraints` instance returned from `semver.NewConstraint()` has a property +`IncludePrerelease` that, when set to true, will return prerelease versions when calls +to `Check()` and `Validate()` are made. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://codeql.github.com) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/hack/tools/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 0000000000..a30a66b1f7 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/collection.go b/hack/tools/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 0000000000..a78235895f --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/constraints.go b/hack/tools/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 0000000000..8b7a10f836 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,601 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint + containsPre []bool + + // IncludePrerelease specifies if pre-releases should be included in + // the results. Note, if a constraint range has a prerelease than + // prereleases will be included for that AND group even if this is + // set to false. + IncludePrerelease bool +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + lenors := len(ors) + or := make([][]*constraint, lenors) + hasPre := make([]bool, lenors) + for k, v := range ors { + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + // If one of the constraints has a prerelease record this. + // This information is used when checking all in an "and" + // group to ensure they all check for prereleases. + if pc.con.pre != "" { + hasPre[k] = true + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{ + constraints: or, + containsPre: hasPre, + } + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for i, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for i, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version, includePre bool) (bool, error) { + return constraintOps[c.origfunc](v, c, includePre) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint, includePre bool) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint parser error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint parser error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) { + + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c, includePre) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/doc.go b/hack/tools/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 0000000000..74f97caa57 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/hack/tools/vendor/github.com/Masterminds/semver/v3/version.go b/hack/tools/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 0000000000..7a3ba73887 --- /dev/null +++ b/hack/tools/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,788 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var looseVersionRegex *regexp.Regexp + +// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are +// not allowed in a valid semantic version. When set to true, NewVersion will coerce +// leading 0's into a valid version. +var CoerceNewVersion = true + +// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion +// function. This is used when CoerceNewVersion is set to false. If set to false +// ErrInvalidSemVer is returned for an invalid version. This does not apply to +// StrictNewVersion. Setting this function to false returns errors more quickly. +var DetailedNewVersionErrors = true + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("invalid semantic version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("invalid metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("invalid prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// looseSemVerRegex is a regular expression that lets invalid semver expressions through +// with enough detail that certain errors can be checked for. +const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") + looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract major, minor, and patch + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + if CoerceNewVersion { + return coerceNewVersion(v) + } + m := versionRegex.FindStringSubmatch(v) + if m == nil { + + // Disabling detailed errors is first so that it is in the fast path. + if !DetailedNewVersionErrors { + return nil, ErrInvalidSemVer + } + + // Check for specific errors with the semver string and return a more detailed + // error. + m = looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + err := validateVersion(m) + if err != nil { + return nil, err + } + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +func coerceNewVersion(v string) (*Version, error) { + m := looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidPrerelease + } else if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} + +// validateVersion checks for common validation issues but may not catch all errors +func validateVersion(m []string) error { + var err error + var v string + if m[1] != "" { + if len(m[1]) > 1 && m[1][0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[2] != "" { + v = strings.TrimPrefix(m[2], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[3] != "" { + v = strings.TrimPrefix(m[3], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[5] != "" { + if err = validatePrerelease(m[5]); err != nil { + return err + } + } + + if m[8] != "" { + if err = validateMetadata(m[8]); err != nil { + return err + } + } + + return nil +} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md index 73fe513468..773af218e9 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md @@ -1,3 +1,72 @@ +## v2.9.0 (2025-11-17) + +* [GH-3508](https://github.com/gophercloud/gophercloud/pull/3508) [v2] Trigger "hold" workflow on merge groups +* [GH-3511](https://github.com/gophercloud/gophercloud/pull/3511) [v2] Closes #2321 - Fix TestRolesCRUD by including DomainID to TestRolesCRUD +* [GH-3513](https://github.com/gophercloud/gophercloud/pull/3513) [v2] build(deps): bump actions/labeler from 5 to 6 +* [GH-3516](https://github.com/gophercloud/gophercloud/pull/3516) [v2] refactor: Trivial fixes +* [GH-3524](https://github.com/gophercloud/gophercloud/pull/3524) [v2] [glance]: Add 'uploading' status +* [GH-3525](https://github.com/gophercloud/gophercloud/pull/3525) [v2] compute: Add host aggregate uuid field +* [GH-3526](https://github.com/gophercloud/gophercloud/pull/3526) [v2] Enable deletion for network and loadbalancer quotas +* [GH-3541](https://github.com/gophercloud/gophercloud/pull/3541) [v2] docs: Document tested releases for acceptance tests +* [GH-3544](https://github.com/gophercloud/gophercloud/pull/3544) [v2] Identity V3: Add Options field to roles. +* [GH-3547](https://github.com/gophercloud/gophercloud/pull/3547) [v2] Add config_drive to server struct +* [GH-3548](https://github.com/gophercloud/gophercloud/pull/3548) [v2] Identity: Add description field to roles +* [GH-3549](https://github.com/gophercloud/gophercloud/pull/3549) [v2] compute: add cpu info topology cells entry +* [GH-3550](https://github.com/gophercloud/gophercloud/pull/3550) [v2] Migrate epoxy jobs to Ubuntu 24.04 (Noble), drop caracal jobs +* [GH-3551](https://github.com/gophercloud/gophercloud/pull/3551) [v2] build(deps): bump github/codeql-action from 3 to 4 +* [GH-3557](https://github.com/gophercloud/gophercloud/pull/3557) [v2] Fix EC2 authentication to work with new Keystone auth requirement +* [GH-3558](https://github.com/gophercloud/gophercloud/pull/3558) [v2] identity/services: add omitempty to the `type` field +* [GH-3559](https://github.com/gophercloud/gophercloud/pull/3559) [v2] fix: handle Nova create image response for microversion 2.45 and above + +## v2.8.0 (2025-08-18) + +* [GH-3348](https://github.com/gophercloud/gophercloud/pull/3348) [v2] [networking] add ExtractRoutersInto func helper to routers +* [GH-3354](https://github.com/gophercloud/gophercloud/pull/3354) [v2] Fix a small typo +* [GH-3358](https://github.com/gophercloud/gophercloud/pull/3358) [v2] tests: fix devstack master branch tests +* [GH-3361](https://github.com/gophercloud/gophercloud/pull/3361) [v2] octavia: fix http_version type to float +* [GH-3362](https://github.com/gophercloud/gophercloud/pull/3362) [v2] tests: fix containerinfra template creation +* [GH-3367](https://github.com/gophercloud/gophercloud/pull/3367) [v2] Use Makefile for CI jobs +* [GH-3375](https://github.com/gophercloud/gophercloud/pull/3375) [v2] core: add missing Builder interfaces +* [GH-3378](https://github.com/gophercloud/gophercloud/pull/3378) [v2] tests: fix failing rabbitmq service +* [GH-3379](https://github.com/gophercloud/gophercloud/pull/3379) [v2] CI: Remove Bobcat +* [GH-3384](https://github.com/gophercloud/gophercloud/pull/3384) [v2] Move master CI jobs to Ubuntu 24.04 +* [GH-3386](https://github.com/gophercloud/gophercloud/pull/3386) [v2] tests: Fix TestBGPAgentCRUD +* [GH-3387](https://github.com/gophercloud/gophercloud/pull/3387) [v2] Update the doc of openstack.AuthOptionsFromEnv function +* [GH-3389](https://github.com/gophercloud/gophercloud/pull/3389) [v2] networking: add constants for statuses +* [GH-3391](https://github.com/gophercloud/gophercloud/pull/3391) [v2] CI: Add Epoxy +* [GH-3393](https://github.com/gophercloud/gophercloud/pull/3393) [v2] dns: implement shared zones list +* [GH-3394](https://github.com/gophercloud/gophercloud/pull/3394) [v2] acceptance: Prevent 409 when bulk-creating secgroup rules +* [GH-3396](https://github.com/gophercloud/gophercloud/pull/3396) [v2] identity: add support for string boolean in users' enabled member +* [GH-3397](https://github.com/gophercloud/gophercloud/pull/3397) [v2] Adjust List func to accept a Builder in tenants, routers and security groups packages +* [GH-3399](https://github.com/gophercloud/gophercloud/pull/3399) [v2] blockstorage: add manage-existing and unmanage api call +* [GH-3401](https://github.com/gophercloud/gophercloud/pull/3401) [v2] Added address groups to Networking extensions, with tests. +* [GH-3407](https://github.com/gophercloud/gophercloud/pull/3407) [v2] neutron: add segment_id support to subnets +* [GH-3413](https://github.com/gophercloud/gophercloud/pull/3413) [v2] build(deps): bump joelanford/go-apidiff from 0.8.2 to 0.8.3 +* [GH-3416](https://github.com/gophercloud/gophercloud/pull/3416) [v2] tests: bump devstack-action +* [GH-3422](https://github.com/gophercloud/gophercloud/pull/3422) [v2] Fix documentation for gateway_ip in subnet update +* [GH-3431](https://github.com/gophercloud/gophercloud/pull/3431) [v2] Use container-infra for OpenStack-API-Version +* [GH-3433](https://github.com/gophercloud/gophercloud/pull/3433) [v2] make: Use fixed version of gotestsum +* [GH-3434](https://github.com/gophercloud/gophercloud/pull/3434) [v2] Randomize test order for unit tests +* [GH-3435](https://github.com/gophercloud/gophercloud/pull/3435) [v2] Add versioned endpoint discovery +* [GH-3438](https://github.com/gophercloud/gophercloud/pull/3438) [v2] dns: add support for /v2/quotas +* [GH-3439](https://github.com/gophercloud/gophercloud/pull/3439) [v2] neutron: add segments extension package +* [GH-3446](https://github.com/gophercloud/gophercloud/pull/3446) nova: add support for hostname updates +* [GH-3452](https://github.com/gophercloud/gophercloud/pull/3452) [v2] neutron: allow omission of subnet_id for IP address +* [GH-3454](https://github.com/gophercloud/gophercloud/pull/3454) [v2] blockstorage: add isPublic query option for volume types +* [GH-3458](https://github.com/gophercloud/gophercloud/pull/3458) [v2] Fix pagination for messaging client +* [GH-3465](https://github.com/gophercloud/gophercloud/pull/3465) [v2] tests: Fix TestVLANTransparentCRUD test +* [GH-3466](https://github.com/gophercloud/gophercloud/pull/3466) [v2] tests: fix tests for recent PR backports +* [GH-3469](https://github.com/gophercloud/gophercloud/pull/3469) [v2] tests: shorten GH-A job names +* [GH-3473](https://github.com/gophercloud/gophercloud/pull/3473) [v2] core: clone service type aliases instead of referencing global slice +* [GH-3475](https://github.com/gophercloud/gophercloud/pull/3475) [v2] Implement update & delete traits on resource provider +* [GH-3476](https://github.com/gophercloud/gophercloud/pull/3476) [v2] tests: fix volumetypes unit tests +* [GH-3477](https://github.com/gophercloud/gophercloud/pull/3477) [v2] script: Improve getenvvar helper +* [GH-3481](https://github.com/gophercloud/gophercloud/pull/3481) [v2] Implement hypervisors.GetExt: Get with Query parameter +* [GH-3487](https://github.com/gophercloud/gophercloud/pull/3487) [v2] Add networking taas tapmirror suite +* [GH-3489](https://github.com/gophercloud/gophercloud/pull/3489) [v2] Fix incorrect ICMP field description in PortRangeMax comment +* [GH-3494](https://github.com/gophercloud/gophercloud/pull/3494) [v2] Networking v2: Support two time formats for subnet, router, SG rule (#3492) +* [GH-3495](https://github.com/gophercloud/gophercloud/pull/3495) [v2] build(deps): bump actions/checkout from 4 to 5 + ## v2.7.0 (2025-04-03) * [GH-3306](https://github.com/gophercloud/gophercloud/pull/3306) [v2] identity: Add Get endpoint by ID diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/Makefile b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/Makefile index 2a0618a6b6..c63adb8d03 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/Makefile +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/Makefile @@ -1,7 +1,9 @@ undefine GOFLAGS GOLANGCI_LINT_VERSION?=v1.62.2 -GO_TEST?=go run gotest.tools/gotestsum@latest --format testname -- +GOTESTSUM_VERSION?=v1.12.2 +GO_TEST?=go run gotest.tools/gotestsum@$(GOTESTSUM_VERSION) --format testname -- +TIMEOUT := "60m" ifeq ($(shell command -v podman 2> /dev/null),) RUNNER=docker @@ -9,15 +11,18 @@ else RUNNER=podman endif -# if the golangci-lint steps fails with the following error message: +# if the golangci-lint steps fails with one of the following error messages: # # directory prefix . does not contain main module or its selected dependencies # +# failed to initialize build cache at /root/.cache/golangci-lint: mkdir /root/.cache/golangci-lint: permission denied +# # you probably have to fix the SELinux security context for root directory plus your cache # # chcon -Rt svirt_sandbox_file_t . # chcon -Rt svirt_sandbox_file_t ~/.cache/golangci-lint lint: + mkdir -p ~/.cache/golangci-lint/$(GOLANGCI_LINT_VERSION) $(RUNNER) run -t --rm \ -v $(shell pwd):/app \ -v ~/.cache/golangci-lint/$(GOLANGCI_LINT_VERSION):/root/.cache \ @@ -31,84 +36,88 @@ format: .PHONY: format unit: - $(GO_TEST) ./... + $(GO_TEST) -shuffle on ./... .PHONY: unit coverage: - $(GO_TEST) -covermode count -coverprofile cover.out -coverpkg=./... ./... + $(GO_TEST) -shuffle on -covermode count -coverprofile cover.out -coverpkg=./... ./... .PHONY: coverage -acceptance: acceptance-baremetal acceptance-blockstorage acceptance-compute acceptance-container acceptance-containerinfra acceptance-db acceptance-dns acceptance-identity acceptance-imageservice acceptance-keymanager acceptance-loadbalancer acceptance-messaging acceptance-networking acceptance-objectstorage acceptance-orchestration acceptance-placement acceptance-sharedfilesystems acceptance-workflow +acceptance: acceptance-basic acceptance-baremetal acceptance-blockstorage acceptance-compute acceptance-container acceptance-containerinfra acceptance-db acceptance-dns acceptance-identity acceptance-image acceptance-keymanager acceptance-loadbalancer acceptance-messaging acceptance-networking acceptance-objectstorage acceptance-orchestration acceptance-placement acceptance-sharedfilesystems acceptance-workflow .PHONY: acceptance +acceptance-basic: + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack +.PHONY: acceptance-basic + acceptance-baremetal: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/baremetal/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/baremetal/... .PHONY: acceptance-baremetal acceptance-blockstorage: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/blockstorage/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/blockstorage/... .PHONY: acceptance-blockstorage acceptance-compute: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/compute/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/compute/... .PHONY: acceptance-compute acceptance-container: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/container/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/container/... .PHONY: acceptance-container acceptance-containerinfra: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/containerinfra/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/containerinfra/... .PHONY: acceptance-containerinfra acceptance-db: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/db/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/db/... .PHONY: acceptance-db acceptance-dns: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/dns/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/dns/... .PHONY: acceptance-dns acceptance-identity: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/identity/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/identity/... .PHONY: acceptance-identity acceptance-image: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/imageservice/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/image/... .PHONY: acceptance-image acceptance-keymanager: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/keymanager/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/keymanager/... .PHONY: acceptance-keymanager acceptance-loadbalancer: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/loadbalancer/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/loadbalancer/... .PHONY: acceptance-loadbalancer acceptance-messaging: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/messaging/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/messaging/... .PHONY: acceptance-messaging acceptance-networking: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/networking/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/networking/... .PHONY: acceptance-networking acceptance-objectstorage: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/objectstorage/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/objectstorage/... .PHONY: acceptance-objectstorage acceptance-orchestration: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/orchestration/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/orchestration/... .PHONY: acceptance-orchestration acceptance-placement: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/placement/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/placement/... .PHONY: acceptance-placement acceptance-sharedfilesystems: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/sharedfilesystems/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/sharedfilesystems/... .PHONY: acceptance-sharefilesystems acceptance-workflow: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/workflow/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/workflow/... .PHONY: acceptance-workflow diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go index 8818e769b8..34d76a1b8d 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go @@ -79,6 +79,11 @@ type EndpointOpts struct { // Required only for services that span multiple regions. Region string + // Version [optional] is the major version of the service required. It it not + // a microversion. Use this to ensure the correct endpoint is selected when + // multiple API versions are available. + Version int + // Availability [optional] is the visibility of the endpoint to be returned. // Valid types include the constants AvailabilityPublic, AvailabilityInternal, // or AvailabilityAdmin from this package. @@ -111,7 +116,7 @@ func (eo *EndpointOpts) ApplyDefaults(t string) { if len(eo.Aliases) == 0 { if aliases, ok := ServiceTypeAliases[eo.Type]; ok { // happy path: user requested a service type by its official name - eo.Aliases = aliases + eo.Aliases = slices.Clone(aliases) } else { // unhappy path: user requested a service type by its alias or an // invalid/unsupported service type @@ -121,7 +126,7 @@ func (eo *EndpointOpts) ApplyDefaults(t string) { // we intentionally override the service type, even if it // was explicitly requested by the user eo.Type = t - eo.Aliases = aliases + eo.Aliases = slices.Clone(aliases) } } } diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go index 893787b787..9ecc5b4efe 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go @@ -24,8 +24,8 @@ OS_PROJECT_NAME and the latter are expected against a v3 auth api. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred as "tenant" in Gophercloud. -If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to -handle projects not on the default domain. +If OS_PROJECT_NAME is set, it requires OS_DOMAIN_ID or OS_DOMAIN_NAME to be +set as well to handle projects not on the default domain. To use this function, first set the OS_* environment variables (for example, by sourcing an `openrc` file), then: diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go index 2ab4af93ee..e018b57a8d 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go @@ -157,5 +157,12 @@ Example of Attaching a Volume to an Instance if err != nil { panic(err) } + +Example of Unmanaging a Volume + + err := volumes.Unmanage(context.TODO(), client, volume.ID).ExtractErr() + if err != nil { + panic(err) + } */ package volumes diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go index 77210943b5..1026d1ecaa 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go @@ -623,6 +623,12 @@ func SetImageMetadata(ctx context.Context, client *gophercloud.ServiceClient, id return } +// BootableOptsBuilder allows extensions to add additional parameters to the +// SetBootable request. +type BootableOptsBuilder interface { + ToBootableMap() (map[string]any, error) +} + // BootableOpts contains options for setting bootable status to a volume. type BootableOpts struct { // Enables or disables the bootable attribute. You can boot an instance from a bootable volume. @@ -636,7 +642,7 @@ func (opts BootableOpts) ToBootableMap() (map[string]any, error) { } // SetBootable will set bootable status on a volume based on the values in BootableOpts -func SetBootable(ctx context.Context, client *gophercloud.ServiceClient, id string, opts BootableOpts) (r SetBootableResult) { +func SetBootable(ctx context.Context, client *gophercloud.ServiceClient, id string, opts BootableOptsBuilder) (r SetBootableResult) { b, err := opts.ToBootableMap() if err != nil { r.Err = err @@ -697,6 +703,12 @@ func ChangeType(ctx context.Context, client *gophercloud.ServiceClient, id strin return } +// ReImageOptsBuilder allows extensions to add additional parameters to the +// ReImage request. +type ReImageOptsBuilder interface { + ToReImageMap() (map[string]any, error) +} + // ReImageOpts contains options for Re-image a volume. type ReImageOpts struct { // New image id @@ -711,7 +723,7 @@ func (opts ReImageOpts) ToReImageMap() (map[string]any, error) { } // ReImage will re-image a volume based on the values in ReImageOpts -func ReImage(ctx context.Context, client *gophercloud.ServiceClient, id string, opts ReImageOpts) (r ReImageResult) { +func ReImage(ctx context.Context, client *gophercloud.ServiceClient, id string, opts ReImageOptsBuilder) (r ReImageResult) { b, err := opts.ToReImageMap() if err != nil { r.Err = err @@ -763,3 +775,14 @@ func ResetStatus(ctx context.Context, client *gophercloud.ServiceClient, id stri _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return } + +// Unmanage removes a volume from Block Storage management without +// removing the back-end storage object that is associated with it. +func Unmanage(ctx context.Context, client *gophercloud.ServiceClient, id string) (r UnmanageResult) { + body := map[string]any{"os-unmanage": make(map[string]any)} + resp, err := client.Post(ctx, actionURL(client, id), body, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) + return +} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go index 3f184b398e..e99ef5e197 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go @@ -399,3 +399,8 @@ type ReImageResult struct { type ResetStatusResult struct { gophercloud.ErrResult } + +// UnmanageResult contains the response error from a Unmanage request. +type UnmanageResult struct { + gophercloud.ErrResult +} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go index 122a3ee699..73ca5c56d5 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go @@ -2,6 +2,7 @@ package openstack import ( "context" + "errors" "fmt" "reflect" "strings" @@ -162,7 +163,7 @@ func v2auth(ctx context.Context, client *gophercloud.ProviderClient, endpoint st } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V2EndpointURL(catalog, opts) + return V2Endpoint(context.TODO(), client, catalog, opts) } return nil @@ -283,7 +284,7 @@ func v3auth(ctx context.Context, client *gophercloud.ProviderClient, endpoint st } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(catalog, opts) + return V3Endpoint(context.TODO(), client, catalog, opts) } return nil @@ -345,13 +346,20 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp } // TODO(stephenfin): Allow passing aliases to all New${SERVICE}V${VERSION} methods in v3 -func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { +func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string, version int) (*gophercloud.ServiceClient, error) { sc := new(gophercloud.ServiceClient) + eo.ApplyDefaults(clientType) + if eo.Version != 0 && eo.Version != version { + return sc, errors.New("Conflict between requested service major version and manually set version") + } + eo.Version = version + url, err := client.EndpointLocator(eo) if err != nil { return sc, err } + sc.ProviderClient = client sc.Endpoint = url sc.Type = clientType @@ -361,7 +369,7 @@ func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointO // NewBareMetalV1 creates a ServiceClient that may be used with the v1 // bare metal package. func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "baremetal") + sc, err := initClientOpts(client, eo, "baremetal", 1) if !strings.HasSuffix(strings.TrimSuffix(sc.Endpoint, "/"), "v1") { sc.ResourceBase = sc.Endpoint + "v1/" } @@ -371,25 +379,25 @@ func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointO // NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 // bare metal introspection package. func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal-introspection") + return initClientOpts(client, eo, "baremetal-introspection", 1) } // NewObjectStorageV1 creates a ServiceClient that may be used with the v1 // object storage package. func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "object-store") + return initClientOpts(client, eo, "object-store", 1) } // NewComputeV2 creates a ServiceClient that may be used with the v2 compute // package. func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "compute") + return initClientOpts(client, eo, "compute", 2) } // NewNetworkV2 creates a ServiceClient that may be used with the v2 network // package. func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "network") + sc, err := initClientOpts(client, eo, "network", 2) sc.ResourceBase = sc.Endpoint + "v2.0/" return sc, err } @@ -398,40 +406,40 @@ func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpt // NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 // block storage service. func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volume") + return initClientOpts(client, eo, "volume", 1) } // NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 // block storage service. func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "block-storage") + return initClientOpts(client, eo, "block-storage", 2) } // NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "block-storage") + return initClientOpts(client, eo, "block-storage", 3) } // NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "shared-file-system") + return initClientOpts(client, eo, "shared-file-system", 2) } // NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 // orchestration service. func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "orchestration") + return initClientOpts(client, eo, "orchestration", 1) } // NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "database") + return initClientOpts(client, eo, "database", 1) } // NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS // service. func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "dns") + sc, err := initClientOpts(client, eo, "dns", 2) sc.ResourceBase = sc.Endpoint + "v2/" return sc, err } @@ -439,7 +447,7 @@ func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) ( // NewImageV2 creates a ServiceClient that may be used to access the v2 image // service. func NewImageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "image") + sc, err := initClientOpts(client, eo, "image", 2) sc.ResourceBase = sc.Endpoint + "v2/" return sc, err } @@ -447,7 +455,7 @@ func NewImageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) // NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 // load balancer service. func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "load-balancer") + sc, err := initClientOpts(client, eo, "load-balancer", 2) // Fixes edge case having an OpenStack lb endpoint with trailing version number. endpoint := strings.Replace(sc.Endpoint, "v2.0/", "", -1) @@ -459,20 +467,20 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi // NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging // service. func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "message") + sc, err := initClientOpts(client, eo, "message", 2) sc.MoreHeaders = map[string]string{"Client-ID": clientID} return sc, err } // NewContainerV1 creates a ServiceClient that may be used with v1 container package func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "application-container") + return initClientOpts(client, eo, "application-container", 1) } // NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key // manager service. func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "key-manager") + sc, err := initClientOpts(client, eo, "key-manager", 1) sc.ResourceBase = sc.Endpoint + "v1/" return sc, err } @@ -480,15 +488,15 @@ func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.Endpoint // NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management // package. func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container-infrastructure-management") + return initClientOpts(client, eo, "container-infrastructure-management", 1) } // NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "workflow") + return initClientOpts(client, eo, "workflow", 2) } // NewPlacementV1 creates a ServiceClient that may be used with the placement package. func NewPlacementV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "placement") + return initClientOpts(client, eo, "placement", 1) } diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go index 44e8cccaeb..c0ccebfa49 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go @@ -1,10 +1,12 @@ package servers import ( + "bytes" "context" "encoding/base64" "encoding/json" "fmt" + "io" "maps" "net" "regexp" @@ -651,6 +653,12 @@ type UpdateOpts struct { // AccessIPv6 provides a new IPv6 address for the instance. AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Hostname changes the hostname of the server. + // Requires microversion 2.90 or later. + // Note: This information is published via the metadata service and requires + // application such as cloud-init to propagate it through to the instance. + Hostname *string `json:"hostname,omitempty"` } // ToServerUpdateMap formats an UpdateOpts structure into a request body. @@ -1044,10 +1052,35 @@ func CreateImage(ctx context.Context, client *gophercloud.ServiceClient, id stri r.Err = err return } + resp, err := client.Post(ctx, actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, + OkCodes: []int{202}, + KeepResponseBody: true, }) + _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) + if r.Err != nil { + return + } + defer resp.Body.Close() + + if v := r.Header.Get("Content-Type"); v != "application/json" { + return + } + + // The response body is expected to be a small JSON object containing only "image_id". + // Read it fully into memory so the response body can be closed immediately. + // If the caller doesn't read from the buffer, it can still be safely garbage collected. + + var buf bytes.Buffer + + _, r.Err = io.Copy(&buf, resp.Body) + if r.Err != nil { + return + } + + r.Body = &buf + return } diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go index 385001c8dd..edc2740f68 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go @@ -7,9 +7,11 @@ import ( "fmt" "net/url" "path" + "strconv" "time" "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack/utils" "github.com/gophercloud/gophercloud/v2/pagination" ) @@ -132,18 +134,49 @@ func (r CreateImageResult) ExtractImageID() (string, error) { if r.Err != nil { return "", r.Err } - // Get the image id from the header + + microversion := r.Header.Get("X-OpenStack-Nova-API-Version") + + major, minor, err := utils.ParseMicroversion(microversion) + if err != nil { + return "", fmt.Errorf("failed to parse X-OpenStack-Nova-API-Version header: %s", err) + } + + // In microversions prior to 2.45, the image ID was provided in the Location header. + if major < 2 || (major == 2 && minor < 45) { + return r.extractImageIDFromLocationHeader() + } + + // Starting from 2.45, it is included in the response body. + return r.extractImageIDFromResponseBody() +} + +func (r CreateImageResult) extractImageIDFromLocationHeader() (string, error) { u, err := url.ParseRequestURI(r.Header.Get("Location")) if err != nil { return "", err } + imageID := path.Base(u.Path) if imageID == "." || imageID == "/" { return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) } + return imageID, nil } +func (r CreateImageResult) extractImageIDFromResponseBody() (string, error) { + var response struct { + ImageID string `json:"image_id"` + } + + if err := r.ExtractInto(&response); err != nil { + return "", err + } + + return response.ImageID, nil +} + // Server represents a server/instance in the OpenStack cloud. type Server struct { // ID uniquely identifies this server amongst all other servers, @@ -283,6 +316,9 @@ type Server struct { // Locked indicates the lock status of the server // This requires microversion 2.9 or later Locked *bool `json:"locked"` + + // ConfigDrive enables metadata injection through a configuration drive. + ConfigDrive bool `json:"-"` } type AttachedVolume struct { @@ -343,6 +379,7 @@ func (r *Server) UnmarshalJSON(b []byte) error { Image any `json:"image"` LaunchedAt gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:launched_at"` TerminatedAt gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:terminated_at"` + ConfigDrive any `json:"config_drive"` } err := json.Unmarshal(b, &s) if err != nil { @@ -364,6 +401,24 @@ func (r *Server) UnmarshalJSON(b []byte) error { r.LaunchedAt = time.Time(s.LaunchedAt) r.TerminatedAt = time.Time(s.TerminatedAt) + switch t := s.ConfigDrive.(type) { + case nil: + r.ConfigDrive = false + case bool: + r.ConfigDrive = t + case string: + if t == "" { + r.ConfigDrive = false + } else { + r.ConfigDrive, err = strconv.ParseBool(t) + if err != nil { + return fmt.Errorf("failed to parse ConfigDrive %q: %v", t, err) + } + } + default: + return fmt.Errorf("unknown type for ConfigDrive: %T (value: %v)", t, t) + } + return err } diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go new file mode 100644 index 0000000000..6178434423 --- /dev/null +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go @@ -0,0 +1,190 @@ +package openstack + +import ( + "context" + "regexp" + "slices" + "strconv" + + "github.com/gophercloud/gophercloud/v2" + tokens2 "github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/v2/openstack/utils" +) + +var versionedServiceTypeAliasRegexp = regexp.MustCompile(`^.*v(\d)$`) + +func extractServiceTypeVersion(serviceType string) int { + matches := versionedServiceTypeAliasRegexp.FindAllStringSubmatch(serviceType, 1) + if matches != nil { + // no point converting to an int + ret, err := strconv.Atoi(matches[0][1]) + if err != nil { + return 0 + } + return ret + } + return 0 +} + +func endpointSupportsVersion(ctx context.Context, client *gophercloud.ProviderClient, serviceType, endpointURL string, expectedVersion int) (bool, error) { + // Swift doesn't support version discovery :( + if expectedVersion == 0 || serviceType == "object-store" { + return true, nil + } + + // Repeating verbatim from keystoneauth1 [1]: + // + // > The sins of our fathers become the blood on our hands. + // > If a user requests an old-style service type such as volumev2, then they + // > are inherently requesting the major API version 2. It's not a good + // > interface, but it's the one that was imposed on the world years ago + // > because the client libraries all hid the version discovery document. + // > In order to be able to ensure that a user who requests volumev2 does not + // > get a block-storage endpoint that only provides v3 of the block-storage + // > service, we need to pull the version out of the service_type. The + // > service-types-authority will prevent the growth of new monstrosities such + // > as this, but in order to move forward without breaking people, we have + // > to just cry in the corner while striking ourselves with thorned branches. + // > That said, for sure only do this hack for officially known service_types. + // + // So yeah, what mordred said. + // + // https://github.com/openstack/keystoneauth/blob/5.10.0/keystoneauth1/discover.py#L270-L290 + impliedVersion := extractServiceTypeVersion(serviceType) + if impliedVersion != 0 && impliedVersion != expectedVersion { + return false, nil + } + + // NOTE(stephenfin) In addition to the above, keystoneauth also supports a URL + // hack whereby it will extract the version from the URL. We may wish to + // implement this too. + + endpointURL, err := utils.BaseVersionedEndpoint(endpointURL) + if err != nil { + return false, err + } + + supportedVersions, err := utils.GetServiceVersions(ctx, client, endpointURL, false) + if err != nil { + return false, err + } + + for _, supportedVersion := range supportedVersions { + if supportedVersion.Major == expectedVersion { + return true, nil + } + } + + return false, nil +} + +/* +V2Endpoint discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V2Endpoint(ctx context.Context, client *gophercloud.ProviderClient, catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. + for _, entry := range catalog.Entries { + if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region != "" && endpoint.Region != opts.Region { + continue + } + + var endpointURL string + switch opts.Availability { + case gophercloud.AvailabilityPublic: + endpointURL = gophercloud.NormalizeURL(endpoint.PublicURL) + case gophercloud.AvailabilityInternal: + endpointURL = gophercloud.NormalizeURL(endpoint.InternalURL) + case gophercloud.AvailabilityAdmin: + endpointURL = gophercloud.NormalizeURL(endpoint.AdminURL) + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + + endpointSupportsVersion, err := endpointSupportsVersion(ctx, client, entry.Type, endpointURL, opts.Version) + if err != nil { + return "", err + } + if !endpointSupportsVersion { + continue + } + + return endpointURL, nil + } + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} + +/* +V3Endpoint discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V3Endpoint(ctx context.Context, client *gophercloud.ProviderClient, catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. + for _, entry := range catalog.Entries { + if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.Availability(endpoint.Interface) { + continue + } + if opts.Region != "" && endpoint.Region != opts.Region && endpoint.RegionID != opts.Region { + continue + } + + endpointURL := gophercloud.NormalizeURL(endpoint.URL) + + endpointSupportsVersion, err := endpointSupportsVersion(ctx, client, entry.Type, endpointURL, opts.Version) + if err != nil { + return "", err + } + if !endpointSupportsVersion { + continue + } + + return endpointURL, nil + } + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go index 14cff0d755..573c1f06f4 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go @@ -8,6 +8,8 @@ import ( tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens" ) +// TODO(stephenfin): Remove this module in v3. The functions below are no longer used. + /* V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired during the v2 identity service. @@ -20,39 +22,33 @@ available on your OpenStack deployment. */ func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. - var endpoints = make([]tokens2.Endpoint, 0, 1) + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. for _, entry := range catalog.Entries { if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { for _, endpoint := range entry.Endpoints { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) + if opts.Region != "" && endpoint.Region != opts.Region { + continue } - } - } - } - // If multiple endpoints were found, use the first result - // and disregard the other endpoints. - // - // This behavior matches the Python library. See GH-1764. - if len(endpoints) > 1 { - endpoints = endpoints[0:1] - } + var endpointURL string + switch opts.Availability { + case gophercloud.AvailabilityPublic: + endpointURL = gophercloud.NormalizeURL(endpoint.PublicURL) + case gophercloud.AvailabilityInternal: + endpointURL = gophercloud.NormalizeURL(endpoint.InternalURL) + case gophercloud.AvailabilityAdmin: + endpointURL = gophercloud.NormalizeURL(endpoint.AdminURL) + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } - // Extract the appropriate URL from the matching Endpoint. - for _, endpoint := range endpoints { - switch opts.Availability { - case gophercloud.AvailabilityPublic: - return gophercloud.NormalizeURL(endpoint.PublicURL), nil - case gophercloud.AvailabilityInternal: - return gophercloud.NormalizeURL(endpoint.InternalURL), nil - case gophercloud.AvailabilityAdmin: - return gophercloud.NormalizeURL(endpoint.AdminURL), nil - default: - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err + return endpointURL, nil + } } } @@ -72,41 +68,35 @@ will also often need to specify a Name and/or a Region depending on what's available on your OpenStack deployment. */ func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + // Extract Endpoints from the catalog entries that match the requested Type, Interface, // Name if provided, and Region if provided. - var endpoints = make([]tokens3.Endpoint, 0, 1) + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. for _, entry := range catalog.Entries { if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { for _, endpoint := range entry.Endpoints { - if opts.Availability != gophercloud.AvailabilityAdmin && - opts.Availability != gophercloud.AvailabilityPublic && - opts.Availability != gophercloud.AvailabilityInternal { - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err + if opts.Availability != gophercloud.Availability(endpoint.Interface) { + continue } - if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { - endpoints = append(endpoints, endpoint) + if opts.Region != "" && endpoint.Region != opts.Region && endpoint.RegionID != opts.Region { + continue } + + return gophercloud.NormalizeURL(endpoint.URL), nil } } } - // If multiple endpoints were found, use the first result - // and disregard the other endpoints. - // - // This behavior matches the Python library. See GH-1764. - if len(endpoints) > 1 { - endpoints = endpoints[0:1] - } - - // Extract the URL from the matching Endpoint. - for _, endpoint := range endpoints { - return gophercloud.NormalizeURL(endpoint.URL), nil - } - // Report an error if there were no matching endpoints. err := &gophercloud.ErrEndpointNotFound{} return "", err diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go index a08980df2c..84a8b9df1d 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go @@ -7,6 +7,12 @@ import ( "github.com/gophercloud/gophercloud/v2/pagination" ) +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToTenantListQuery() (string, error) +} + // ListOpts filters the Tenants that are returned by the List call. type ListOpts struct { // Marker is the ID of the last Tenant on the previous page. @@ -16,15 +22,21 @@ type ListOpts struct { Limit int `q:"limit"` } +// ToTenantListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToTenantListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + // List enumerates the Tenants to which the current token has access. -func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { url := listURL(client) if opts != nil { - q, err := gophercloud.BuildQueryString(opts) + query, err := opts.ToTenantListQuery() if err != nil { return pagination.Pager{Err: err} } - url += q.String() + url += query } return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { return TenantPage{pagination.LinkedPageBase{PageResult: r}} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go index 5b1f3d6882..1d4cb54928 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go @@ -300,8 +300,7 @@ func Create(ctx context.Context, c *gophercloud.ServiceClient, opts tokens.AuthO deleteBodyElements(b, "token") resp, err := c.Post(ctx, ec2tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - OkCodes: []int{200}, + OkCodes: []int{200}, }) _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return @@ -320,8 +319,7 @@ func ValidateS3Token(ctx context.Context, c *gophercloud.ServiceClient, opts tok deleteBodyElements(b, "body_hash", "headers", "host", "params", "path", "verb") resp, err := c.Post(ctx, s3tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - OkCodes: []int{200}, + OkCodes: []int{200}, }) _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go index 8c66b36e20..0b23269ffa 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go @@ -214,6 +214,12 @@ func GetConsumer(ctx context.Context, client *gophercloud.ServiceClient, id stri return } +// UpdateConsumerOptsBuilder allows extensions to add additional parameters to the +// UpdateConsumer request. +type UpdateConsumerOptsBuilder interface { + ToOAuth1UpdateConsumerMap() (map[string]any, error) +} + // UpdateConsumerOpts provides options used to update a consumer. type UpdateConsumerOpts struct { // Description is the consumer description. @@ -227,7 +233,7 @@ func (opts UpdateConsumerOpts) ToOAuth1UpdateConsumerMap() (map[string]any, erro } // UpdateConsumer updates an existing Consumer. -func UpdateConsumer(ctx context.Context, client *gophercloud.ServiceClient, id string, opts UpdateConsumerOpts) (r UpdateConsumerResult) { +func UpdateConsumer(ctx context.Context, client *gophercloud.ServiceClient, id string, opts UpdateConsumerOptsBuilder) (r UpdateConsumerResult) { b, err := opts.ToOAuth1UpdateConsumerMap() if err != nil { r.Err = err diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go index 147be19927..eedc13a330 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go @@ -13,10 +13,14 @@ const ( // been reserved for an image in the image registry. ImageStatusQueued ImageStatus = "queued" - // ImageStatusSaving denotes that an image’s raw data is currently being + // ImageStatusSaving denotes that an image's raw data is currently being // uploaded to Glance ImageStatusSaving ImageStatus = "saving" + // ImageStatusUploading denotes that an image's raw data is currently being + // uploaded to Glance through the upload process + ImageStatusUploading ImageStatus = "uploading" + // ImageStatusActive denotes an image that is fully available in Glance. ImageStatusActive ImageStatus = "active" diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go index 710a6edf5b..67196a5202 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go @@ -127,7 +127,7 @@ func (opts UpdateOpts) ToFlavorUpdateMap() (map[string]any, error) { // Update is an operation which modifies the attributes of the specified // Flavor. -func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { +func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToFlavorUpdateMap() if err != nil { r.Err = err diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go index 62a4f179ee..ab0b22c6bc 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go @@ -263,6 +263,12 @@ func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts U return } +// CreateRuleOptsBuilder allows extensions to add additional parameters to the +// CreateRule request. +type CreateRuleOptsBuilder interface { + ToRuleCreateMap() (map[string]any, error) +} + // CreateRuleOpts is the common options struct used in this package's CreateRule // operation. type CreateRuleOpts struct { @@ -300,7 +306,7 @@ func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]any, error) { } // CreateRule will create and associate a Rule with a particular L7Policy. -func CreateRule(ctx context.Context, c *gophercloud.ServiceClient, policyID string, opts CreateRuleOpts) (r CreateRuleResult) { +func CreateRule(ctx context.Context, c *gophercloud.ServiceClient, policyID string, opts CreateRuleOptsBuilder) (r CreateRuleResult) { b, err := opts.ToRuleCreateMap() if err != nil { r.Err = err diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go index 3216fbddd0..abd5d08970 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go @@ -380,7 +380,7 @@ func (opts UpdateOpts) ToListenerUpdateMap() (map[string]any, error) { // Update is an operation which modifies the attributes of the specified // Listener. -func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { +func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToListenerUpdateMap() if err != nil { r.Err = err diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go index f815806f39..095170edd3 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go @@ -208,7 +208,7 @@ func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]any, error) { // Update is an operation which modifies the attributes of the specified // LoadBalancer. -func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { +func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToLoadBalancerUpdateMap() if err != nil { r.Err = err diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go index be5701c5f4..15a503badc 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go @@ -2,6 +2,7 @@ package monitors import ( "context" + "strconv" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/pagination" @@ -153,7 +154,25 @@ type CreateOpts struct { // ToMonitorCreateMap builds a request body from CreateOpts. func (opts CreateOpts) ToMonitorCreateMap() (map[string]any, error) { - return gophercloud.BuildRequestBody(opts, "healthmonitor") + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + if v, ok := b["healthmonitor"]; ok { + if m, ok := v.(map[string]any); ok { + if v, ok := m["http_version"]; ok { + if v, ok := v.(string); ok { + m["http_version"], err = strconv.ParseFloat(v, 64) + if err != nil { + return nil, err + } + } + } + } + } + + return b, nil } /* @@ -247,7 +266,25 @@ type UpdateOpts struct { // ToMonitorUpdateMap builds a request body from UpdateOpts. func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]any, error) { - return gophercloud.BuildRequestBody(opts, "healthmonitor") + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + if v, ok := b["healthmonitor"]; ok { + if m, ok := v.(map[string]any); ok { + if v, ok := m["http_version"]; ok { + if v, ok := v.(string); ok { + m["http_version"], err = strconv.ParseFloat(v, 64) + if err != nil { + return nil, err + } + } + } + } + } + + return b, nil } // Update is an operation which modifies the attributes of the specified diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go index 644ef18700..6e8563faaa 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go @@ -1,6 +1,9 @@ package monitors import ( + "encoding/json" + "strconv" + "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/pagination" ) @@ -61,7 +64,7 @@ type Monitor struct { HTTPMethod string `json:"http_method"` // The HTTP version that the monitor uses for requests. - HTTPVersion string `json:"http_version"` + HTTPVersion string `json:"-"` // The HTTP path of the request sent by the monitor to test the health of a // member. Must be a string beginning with a forward slash (/). @@ -96,6 +99,26 @@ type Monitor struct { Tags []string `json:"tags"` } +func (r *Monitor) UnmarshalJSON(b []byte) error { + type tmp Monitor + var s struct { + tmp + HTTPVersion float64 `json:"http_version"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Monitor(s.tmp) + if s.HTTPVersion != 0 { + r.HTTPVersion = strconv.FormatFloat(s.HTTPVersion, 'f', 1, 64) + } + + return nil +} + // MonitorPage is the page returned by a pager when traversing over a // collection of health monitors. type MonitorPage struct { diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go new file mode 100644 index 0000000000..85dff7818c --- /dev/null +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go @@ -0,0 +1,7 @@ +package floatingips + +const ( + StatusActive = "ACTIVE" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go index f6ca654841..def4699db3 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -8,6 +8,12 @@ import ( "github.com/gophercloud/gophercloud/v2/pagination" ) +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToRouterListQuery() (string, error) +} + // ListOpts allows the filtering and sorting of paginated collections through // the API. Filtering is achieved by passing in struct field values that map to // the floating IP attributes you want to see returned. SortKey allows you to @@ -33,19 +39,31 @@ type ListOpts struct { RevisionNumber *int `q:"revision_number"` } +// ToRouterListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRouterListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return "", err + } + return q.String(), nil +} + // List returns a Pager which allows you to iterate over a collection of // routers. It accepts a ListOpts struct, which allows you to filter and sort // the returned collection for greater efficiency. // // Default policy settings return only those routers that are owned by the // tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToRouterListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { return RouterPage{pagination.LinkedPageBase{PageResult: r}} }) } diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go index d75615b773..d657160ba2 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go @@ -21,7 +21,7 @@ type GatewayInfo struct { // router. type ExternalFixedIP struct { IPAddress string `json:"ip_address,omitempty"` - SubnetID string `json:"subnet_id"` + SubnetID string `json:"subnet_id,omitempty"` } // Route is a possible route in a router. @@ -82,10 +82,48 @@ type Router struct { RevisionNumber int `json:"revision_number"` // Timestamp when the router was created - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"-"` // Timestamp when the router was last updated - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"-"` +} + +func (r *Router) UnmarshalJSON(b []byte) error { + type tmp Router + + // Support for older neutron time format + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = Router(s1.tmp) + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + + return nil + } + + // Support for newer neutron time format + var s2 struct { + tmp + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = Router(s2.tmp) + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + + return nil } // RouterPage is the page returned by a pager when traversing over a @@ -122,11 +160,14 @@ func (r RouterPage) IsEmpty() (bool, error) { // and extracts the elements into a slice of Router structs. In other words, // a generic collection is mapped into a relevant slice. func ExtractRouters(r pagination.Page) ([]Router, error) { - var s struct { - Routers []Router `json:"routers"` - } - err := (r.(RouterPage)).ExtractInto(&s) - return s.Routers, err + var s []Router + err := ExtractRoutersInto(r, &s) + return s, err +} + +// ExtractRoutersInto extracts the elements into a slice of Router structs. +func ExtractRoutersInto(r pagination.Page, v any) error { + return r.(RouterPage).Result.ExtractIntoSlicePtr(v, "routers") } type commonResult struct { diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go index 77768a3dac..edd253f037 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go @@ -7,41 +7,60 @@ import ( "github.com/gophercloud/gophercloud/v2/pagination" ) +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSecGroupListQuery() (string, error) +} + // ListOpts allows the filtering and sorting of paginated collections through // the API. Filtering is achieved by passing in struct field values that map to // the security group rule attributes you want to see returned. SortKey allows // you to sort by a particular network attribute. SortDir sets the direction, // and is either `asc' or `desc'. Marker and Limit are used for pagination. type ListOpts struct { - Direction string `q:"direction"` - EtherType string `q:"ethertype"` - ID string `q:"id"` - Description string `q:"description"` - PortRangeMax int `q:"port_range_max"` - PortRangeMin int `q:"port_range_min"` - Protocol string `q:"protocol"` - RemoteGroupID string `q:"remote_group_id"` - RemoteIPPrefix string `q:"remote_ip_prefix"` - SecGroupID string `q:"security_group_id"` - TenantID string `q:"tenant_id"` - ProjectID string `q:"project_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` - RevisionNumber *int `q:"revision_number"` + Direction string `q:"direction"` + EtherType string `q:"ethertype"` + ID string `q:"id"` + Description string `q:"description"` + PortRangeMax int `q:"port_range_max"` + PortRangeMin int `q:"port_range_min"` + Protocol string `q:"protocol"` + RemoteAddressGroupID string `q:"remote_address_group_id"` + RemoteGroupID string `q:"remote_group_id"` + RemoteIPPrefix string `q:"remote_ip_prefix"` + SecGroupID string `q:"security_group_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + RevisionNumber *int `q:"revision_number"` +} + +// ToSecGroupListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSecGroupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return "", err + } + return q.String(), nil } // List returns a Pager which allows you to iterate over a collection of // security group rules. It accepts a ListOpts struct, which allows you to filter // and sort the returned collection for greater efficiency. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToSecGroupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}} }) } @@ -106,7 +125,7 @@ type CreateOpts struct { // The maximum port number in the range that is matched by the security group // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If - // the protocol is ICMP, this value must be an ICMP type. + // the protocol is ICMP, this value must be an ICMP code. PortRangeMax int `json:"port_range_max,omitempty"` // The minimum port number in the range that is matched by the security group @@ -119,12 +138,16 @@ type CreateOpts struct { // "tcp", "udp", "icmp" or an empty string. Protocol RuleProtocol `json:"protocol,omitempty"` + // The remote address group ID to be associated with this security group rule. + // You can specify either RemoteAddressGroupID, RemoteGroupID, or RemoteIPPrefix + RemoteAddressGroupID string `json:"remote_address_group_id,omitempty"` + // The remote group ID to be associated with this security group rule. You can - // specify either RemoteGroupID or RemoteIPPrefix. + // specify either RemoteAddressGroupID,RemoteGroupID or RemoteIPPrefix. RemoteGroupID string `json:"remote_group_id,omitempty"` // The remote IP prefix to be associated with this security group rule. You can - // specify either RemoteGroupID or RemoteIPPrefix. This attribute matches the + // specify either RemoteAddressGroupID,RemoteGroupID or RemoteIPPrefix. This attribute matches the // specified IP prefix as the source IP address of the IP packet. RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"` diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go index 8a3355dfe0..03696ac203 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go @@ -1,6 +1,7 @@ package rules import ( + "encoding/json" "time" "github.com/gophercloud/gophercloud/v2" @@ -44,6 +45,10 @@ type SecGroupRule struct { // "tcp", "udp", "icmp" or an empty string. Protocol string + // The remote address group ID to be associated with this security group rule. + // You can specify either RemoteAddressGroupID, RemoteGroupID, or RemoteIPPrefix + RemoteAddressGroupID string `json:"remote_address_group_id"` + // The remote group ID to be associated with this security group rule. You // can specify either RemoteGroupID or RemoteIPPrefix. RemoteGroupID string `json:"remote_group_id"` @@ -63,10 +68,48 @@ type SecGroupRule struct { RevisionNumber int `json:"revision_number"` // Timestamp when the rule was created - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"-"` // Timestamp when the rule was last updated - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"-"` +} + +func (r *SecGroupRule) UnmarshalJSON(b []byte) error { + type tmp SecGroupRule + + // Support for older neutron time format + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = SecGroupRule(s1.tmp) + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + + return nil + } + + // Support for newer neutron time format + var s2 struct { + tmp + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = SecGroupRule(s2.tmp) + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + + return nil } // SecGroupRulePage is the page returned by a pager when traversing over a diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go new file mode 100644 index 0000000000..6bec77fa79 --- /dev/null +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go @@ -0,0 +1,9 @@ +package trunks + +const ( + StatusActive = "ACTIVE" + StatusBuild = "BUILD" + StatusDegraded = "DEGRADED" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go new file mode 100644 index 0000000000..1214ce9deb --- /dev/null +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go @@ -0,0 +1,8 @@ +package networks + +const ( + StatusActive = "ACTIVE" + StatusBuild = "BUILD" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go new file mode 100644 index 0000000000..6275839bf4 --- /dev/null +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go @@ -0,0 +1,8 @@ +package ports + +const ( + StatusActive = "ACTIVE" + StatusBuild = "BUILD" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go index 74a0fa3b49..db223d48c1 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go @@ -49,7 +49,7 @@ type DeleteResult struct { // IP is a sub-struct that represents an individual IP. type IP struct { - SubnetID string `json:"subnet_id"` + SubnetID string `json:"subnet_id,omitempty"` IPAddress string `json:"ip_address,omitempty"` } diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go index 150afd7394..85c5d2b402 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go @@ -43,6 +43,7 @@ type ListOpts struct { NotTags string `q:"not-tags"` NotTagsAny string `q:"not-tags-any"` RevisionNumber *int `q:"revision_number"` + SegmentID string `q:"segment_id"` } // ToSubnetListQuery formats a ListOpts into a query string. @@ -147,6 +148,10 @@ type CreateOpts struct { // Prefixlen is used when user creates a subnet from the subnetpool. It will // overwrite the "default_prefixlen" value of the referenced subnetpool. Prefixlen int `json:"prefixlen,omitempty"` + + // SegmentID is a network segment the subnet is associated with. It is + // available when segment extension is enabled. + SegmentID string `json:"segment_id,omitempty"` } // ToSubnetCreateMap builds a request body from CreateOpts. @@ -194,9 +199,8 @@ type UpdateOpts struct { // AllocationPools are IP Address pools that will be available for DHCP. AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` - // GatewayIP sets gateway information for the subnet. Setting to nil will - // cause a default gateway to automatically be created. Setting to an empty - // string will cause the subnet to be created with no gateway. Setting to + // GatewayIP sets gateway information for the subnet. Setting to an empty + // string will cause the subnet to not have a gateway. Setting to // an explicit address will set that address as the gateway. GatewayIP *string `json:"gateway_ip,omitempty"` @@ -219,6 +223,10 @@ type UpdateOpts struct { // will set revision_number=%s. If the revision number does not match, the // update will fail. RevisionNumber *int `json:"-" h:"If-Match"` + + // SegmentID is a network segment the subnet is associated with. It is + // available when segment extension is enabled. + SegmentID *string `json:"segment_id,omitempty"` } // ToSubnetUpdateMap builds a request body from UpdateOpts. diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go index 01c6acc070..4f0aa8408d 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go @@ -1,6 +1,7 @@ package subnets import ( + "encoding/json" "time" "github.com/gophercloud/gophercloud/v2" @@ -124,11 +125,53 @@ type Subnet struct { // RevisionNumber optionally set via extensions/standard-attr-revisions RevisionNumber int `json:"revision_number"` + // SegmentID of a network segment the subnet is associated with. It is + // available when segment extension is enabled. + SegmentID string `json:"segment_id"` + // Timestamp when the subnet was created - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"-"` // Timestamp when the subnet was last updated - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"-"` +} + +func (r *Subnet) UnmarshalJSON(b []byte) error { + type tmp Subnet + + // Support for older neutron time format + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = Subnet(s1.tmp) + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + + return nil + } + + // Support for newer neutron time format + var s2 struct { + tmp + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = Subnet(s2.tmp) + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + + return nil } // SubnetPage is the page returned by a pager when traversing over a collection diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go index 40080f7af2..f219c0bf4d 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go @@ -6,9 +6,7 @@ import ( "strings" ) -// BaseEndpoint will return a URL without the /vX.Y -// portion of the URL. -func BaseEndpoint(endpoint string) (string, error) { +func parseEndpoint(endpoint string, includeVersion bool) (string, error) { u, err := url.Parse(endpoint) if err != nil { return "", err @@ -21,8 +19,23 @@ func BaseEndpoint(endpoint string) (string, error) { if version := versionRe.FindString(path); version != "" { versionIndex := strings.Index(path, version) + if includeVersion { + versionIndex += len(version) + } u.Path = path[:versionIndex] } return u.String(), nil } + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + return parseEndpoint(endpoint, false) +} + +// BaseVersionedEndpoint will return a URL with the /vX.Y portion of the URL, +// if present, but without a project ID or similar +func BaseVersionedEndpoint(endpoint string) (string, error) { + return parseEndpoint(endpoint, true) +} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go index 6c720e57ef..ccc56345a6 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go @@ -3,7 +3,6 @@ package utils import ( "context" "fmt" - "strconv" "strings" "github.com/gophercloud/gophercloud/v2" @@ -29,6 +28,7 @@ var goodStatus = map[string]bool{ // It returns the highest-Priority Version, OR exact match with client endpoint, // among the alternatives that are provided, as well as its corresponding endpoint. func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + // TODO(stephenfin): This could be removed since we can accomplish this with GetServiceVersions now. type linkResp struct { Href string `json:"href"` Rel string `json:"rel"` @@ -114,123 +114,3 @@ func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, reco return highest, endpoint, nil } - -type SupportedMicroversions struct { - MaxMajor int - MaxMinor int - MinMajor int - MinMinor int -} - -// GetSupportedMicroversions returns the minimum and maximum microversion that is supported by the ServiceClient Endpoint. -func GetSupportedMicroversions(ctx context.Context, client *gophercloud.ServiceClient) (SupportedMicroversions, error) { - type valueResp struct { - ID string `json:"id"` - Status string `json:"status"` - Version string `json:"version"` - MinVersion string `json:"min_version"` - } - - type response struct { - Version valueResp `json:"version"` - Versions []valueResp `json:"versions"` - } - var minVersion, maxVersion string - var supportedMicroversions SupportedMicroversions - var resp response - _, err := client.Get(ctx, client.Endpoint, &resp, &gophercloud.RequestOpts{ - OkCodes: []int{200, 300}, - }) - - if err != nil { - return supportedMicroversions, err - } - - if len(resp.Versions) > 0 { - // We are dealing with an unversioned endpoint - // We only handle the case when there is exactly one, and assume it is the correct one - if len(resp.Versions) > 1 { - return supportedMicroversions, fmt.Errorf("unversioned endpoint with multiple alternatives not supported") - } - minVersion = resp.Versions[0].MinVersion - maxVersion = resp.Versions[0].Version - } else { - minVersion = resp.Version.MinVersion - maxVersion = resp.Version.Version - } - - // Return early if the endpoint does not support microversions - if minVersion == "" && maxVersion == "" { - return supportedMicroversions, fmt.Errorf("microversions not supported by ServiceClient Endpoint") - } - - supportedMicroversions.MinMajor, supportedMicroversions.MinMinor, err = ParseMicroversion(minVersion) - if err != nil { - return supportedMicroversions, err - } - - supportedMicroversions.MaxMajor, supportedMicroversions.MaxMinor, err = ParseMicroversion(maxVersion) - if err != nil { - return supportedMicroversions, err - } - - return supportedMicroversions, nil -} - -// RequireMicroversion checks that the required microversion is supported and -// returns a ServiceClient with the microversion set. -func RequireMicroversion(ctx context.Context, client gophercloud.ServiceClient, required string) (gophercloud.ServiceClient, error) { - supportedMicroversions, err := GetSupportedMicroversions(ctx, &client) - if err != nil { - return client, fmt.Errorf("unable to determine supported microversions: %w", err) - } - supported, err := supportedMicroversions.IsSupported(required) - if err != nil { - return client, err - } - if !supported { - return client, fmt.Errorf("microversion %s not supported. Supported versions: %v", required, supportedMicroversions) - } - client.Microversion = required - return client, nil -} - -// IsSupported checks if a microversion falls in the supported interval. -// It returns true if the version is within the interval and false otherwise. -func (supported SupportedMicroversions) IsSupported(version string) (bool, error) { - // Parse the version X.Y into X and Y integers that are easier to compare. - vMajor, vMinor, err := ParseMicroversion(version) - if err != nil { - return false, err - } - - // Check that the major version number is supported. - if (vMajor < supported.MinMajor) || (vMajor > supported.MaxMajor) { - return false, nil - } - - // Check that the minor version number is supported - if (vMinor <= supported.MaxMinor) && (vMinor >= supported.MinMinor) { - return true, nil - } - - return false, nil -} - -// ParseMicroversion parses the version major.minor into separate integers major and minor. -// For example, "2.53" becomes 2 and 53. -func ParseMicroversion(version string) (major int, minor int, err error) { - parts := strings.Split(version, ".") - if len(parts) != 2 { - return 0, 0, fmt.Errorf("invalid microversion format: %q", version) - } - major, err = strconv.Atoi(parts[0]) - if err != nil { - return 0, 0, err - } - minor, err = strconv.Atoi(parts[1]) - if err != nil { - return 0, 0, err - } - return major, minor, nil -} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go new file mode 100644 index 0000000000..86d1d14c34 --- /dev/null +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go @@ -0,0 +1,372 @@ +package utils + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/gophercloud/gophercloud/v2" +) + +type Status string + +const ( + StatusCurrent Status = "CURRENT" + StatusSupported Status = "SUPPORTED" + StatusDeprecated Status = "DEPRECATED" + StatusExperimental Status = "EXPERIMENTAL" + StatusUnknown Status = "" +) + +// SupportedVersion stores a normalized form of the API version data. It handles APIs that +// support microversions as well as those that do not. +type SupportedVersion struct { + // Major is the major version number of the API + Major int + // Minor is the minor version number of the API + Minor int + // Status is the status of the API + Status Status + SupportedMicroversions +} + +// SupportedMicroversions stores a normalized form of the maximum and minimum API microversions +// supported by a given service. +type SupportedMicroversions struct { + // MaxMajor is the major version number of the maximum supported API microversion + MaxMajor int + // MaxMinor is the minor version number of the maximum supported API microversion + MaxMinor int + // MinMajor is the major version number of the minimum supported API microversion + MinMajor int + // MinMinor is the minor version number of the minimum supported API microversion + MinMinor int +} + +type version struct { + ID string `json:"id"` + Status string `json:"status"` + Version string `json:"version,omitempty"` + MaxVersion string `json:"max_version,omitempty"` + MinVersion string `json:"min_version"` +} + +type response struct { + Versions []version `json:"-"` +} + +func (r *response) UnmarshalJSON(in []byte) error { + // intermediateResponse is an intermediate struct that allows us to offload the difference + // between a single version document and a multi-version document to the json parser and + // only focus on differences in the latter + type intermediateResponse struct { + ID string `json:"id"` + Version *version `json:"version"` + Versions *json.RawMessage `json:"versions"` + } + + data := intermediateResponse{} + if err := json.Unmarshal(in, &data); err != nil { + return err + } + + // case 1: we have a single enveloped version object + // + // this is the approach used by Manila for single version responses + if data.Version != nil { + r.Versions = []version{*data.Version} + return nil + } + + // case 2: we have an singly enveloped array of version objects + // + // this is the approach used by nova, cinder and glance, among others, for multi-version + // responses + if data.Versions != nil { + var versionArr []version + if err := json.Unmarshal(*data.Versions, &versionArr); err == nil { + r.Versions = versionArr + return nil + } + } + + // case 3: we have an doubly enveloped array of version objects + // + // this is the approach used by keystone and barbican, among others, for multi-version + // responses + if data.Versions != nil { + type values struct { + Values []version `json:"values"` + } + + var valuesObj values + if err := json.Unmarshal(*data.Versions, &valuesObj); err == nil { + r.Versions = valuesObj.Values + return nil + } + } + + // case 4: we have a single unenveloped version object + // + // this is the approach used by most other services for single version responses + if data.ID != "" { + r.Versions = []version{{ID: data.ID}} + return nil + } + + return fmt.Errorf("failed to unmarshal versions document: %s", in) +} + +func extractVersion(endpointURL string) (int, int, error) { + u, err := url.Parse(endpointURL) + if err != nil { + return 0, 0, err + } + + parts := strings.Split(strings.TrimRight(u.Path, "/"), "/") + if len(parts) == 0 { + return 0, 0, fmt.Errorf("expected path with version, got: %s", u.Path) + } + + // first, check the nth path element for a version string + if majorVersion, minorVersion, err := ParseVersion(parts[len(parts)-1]); err == nil { + return majorVersion, minorVersion, nil + } + + // if there are no more parts, quit + if len(parts) == 1 { + // we don't return the error message directly since it might be misleading: at this point + // we might have a *malformed* version identifier rather than *no* version identifier + return 0, 0, fmt.Errorf("failed to infer version from path: %s", u.Path) + } + + // the guidelines say we should use the currently scoped project_id from the token, but we + // don't necessarily have a token yet so we speculatively look at the (n-1)th path element + // (but only that) just as keystoneauth does + // + // https://github.com/openstack/keystoneauth/blob/master/keystoneauth1/discover.py#L1534-L1545 + if majorVersion, minorVersion, err := ParseVersion(parts[len(parts)-1]); err == nil { + return majorVersion, minorVersion, err + } + + // once again, we don't return the error message directly + return 0, 0, fmt.Errorf("failed to infer version from path: %s", u.Path) +} + +// GetServiceVersions returns the versions supported by the ServiceClient Endpoint. +// If the endpoint resolves to an unversioned discovery API, this should return one or more supported versions. +// If the endpoint resolves to a versioned discovery API, this should return exactly one supported version. +func GetServiceVersions(ctx context.Context, client *gophercloud.ProviderClient, endpointURL string, discoverVersions bool) ([]SupportedVersion, error) { + var supportedVersions []SupportedVersion + var endpointVersion *SupportedVersion + + if majorVersion, minorVersion, err := extractVersion(endpointURL); err == nil { + endpointVersion = &SupportedVersion{Major: majorVersion, Minor: minorVersion} + if !discoverVersions { + return append(supportedVersions, *endpointVersion), nil + } + } + + var resp response + _, err := client.Request(ctx, "GET", endpointURL, &gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + if err != nil { + // we weren't able to find a discovery document but we have version information from the URL + if endpointVersion != nil { + return append(supportedVersions, *endpointVersion), nil + } + return supportedVersions, err + } + + versions := resp.Versions + + for _, version := range versions { + majorVersion, minorVersion, err := ParseVersion(version.ID) + if err != nil { + return supportedVersions, err + } + + status, err := ParseStatus(version.Status) + if err != nil { + return supportedVersions, err + } + + supportedVersion := SupportedVersion{ + Major: majorVersion, + Minor: minorVersion, + Status: status, + } + + // Only normalize the microversions if there are microversions to normalize + if (version.Version != "" || version.MaxVersion != "") && version.MinVersion != "" { + supportedVersion.MinMajor, supportedVersion.MinMinor, err = ParseMicroversion(version.MinVersion) + if err != nil { + return supportedVersions, err + } + + maxVersion := version.Version + if maxVersion == "" { + maxVersion = version.MaxVersion + } + supportedVersion.MaxMajor, supportedVersion.MaxMinor, err = ParseMicroversion(maxVersion) + if err != nil { + return supportedVersions, err + } + } + + supportedVersions = append(supportedVersions, supportedVersion) + } + + sort.Slice(supportedVersions, func(i, j int) bool { + return supportedVersions[i].Major > supportedVersions[j].Major || (supportedVersions[i].Major == supportedVersions[j].Major && + supportedVersions[i].Minor > supportedVersions[j].Minor) + }) + + return supportedVersions, nil +} + +// GetSupportedMicroversions returns the minimum and maximum microversion that is supported by the ServiceClient Endpoint. +func GetSupportedMicroversions(ctx context.Context, client *gophercloud.ServiceClient) (SupportedMicroversions, error) { + var supportedMicroversions SupportedMicroversions + + supportedVersions, err := GetServiceVersions(ctx, client.ProviderClient, client.Endpoint, true) + if err != nil { + return supportedMicroversions, err + } + + // If there are multiple versions then we were handed an unversioned endpoint. These don't + // provide microversion information, so we need to fail. Likewise, if there are no versions + // then something has gone wrong and we also need to fail. + if len(supportedVersions) > 1 { + return supportedMicroversions, fmt.Errorf("unversioned endpoint with multiple alternatives not supported") + } else if len(supportedVersions) == 0 { + return supportedMicroversions, fmt.Errorf("microversions not supported by endpoint") + } + + supportedMicroversions = supportedVersions[0].SupportedMicroversions + + if supportedMicroversions.MaxMajor == 0 && + supportedMicroversions.MaxMinor == 0 && + supportedMicroversions.MinMajor == 0 && + supportedMicroversions.MinMinor == 0 { + return supportedMicroversions, fmt.Errorf("microversions not supported by endpoint") + } + + return supportedMicroversions, err +} + +// RequireMicroversion checks that the required microversion is supported and +// returns a ServiceClient with the microversion set. +func RequireMicroversion(ctx context.Context, client gophercloud.ServiceClient, required string) (gophercloud.ServiceClient, error) { + supportedMicroversions, err := GetSupportedMicroversions(ctx, &client) + if err != nil { + return client, fmt.Errorf("unable to determine supported microversions: %w", err) + } + supported, err := supportedMicroversions.IsSupported(required) + if err != nil { + return client, err + } + if !supported { + return client, fmt.Errorf("microversion %s not supported. Supported versions: %v", required, supportedMicroversions) + } + client.Microversion = required + return client, nil +} + +// IsSupported checks if a microversion falls in the supported interval. +// It returns true if the version is within the interval and false otherwise. +func (supported SupportedMicroversions) IsSupported(version string) (bool, error) { + // Parse the version X.Y into X and Y integers that are easier to compare. + vMajor, vMinor, err := ParseMicroversion(version) + if err != nil { + return false, err + } + + // Check that the major version number is supported. + if (vMajor < supported.MinMajor) || (vMajor > supported.MaxMajor) { + return false, nil + } + + // Check that the minor version number is supported + if (vMinor <= supported.MaxMinor) && (vMinor >= supported.MinMinor) { + return true, nil + } + + return false, nil +} + +// ParseVersion parsed the version strings v{MAJOR} and v{MAJOR}.{MINOR} into separate integers +// major and minor. +// For example, "v2.1" becomes 2 and 1, "v3" becomes 3 and 0, and "1" becomes 1 and 0. +func ParseVersion(version string) (major, minor int, err error) { + if version == "" { + return 0, 0, fmt.Errorf("empty version provided") + } + + // We use the regex indicated by the version discovery guidelines. + // + // https://specs.openstack.org/openstack/api-sig/guidelines/consuming-catalog/version-discovery.html#inferring-version + // + // However, we diverge slightly since not all services include the 'v' prefix (glares at zaqar) + versionRe := regexp.MustCompile(`^v?(?P[0-9]+)(\.(?P[0-9]+))?$`) + + match := versionRe.FindStringSubmatch(version) + if len(match) == 0 { + return 0, 0, fmt.Errorf("invalid format: %q", version) + } + + major, err = strconv.Atoi(match[versionRe.SubexpIndex("major")]) + if err != nil { + return 0, 0, err + } + + minor = 0 + if match[versionRe.SubexpIndex("minor")] != "" { + minor, err = strconv.Atoi(match[versionRe.SubexpIndex("minor")]) + if err != nil { + return 0, 0, err + } + } + + return major, minor, nil +} + +// ParseMicroversion parses the version major.minor into separate integers major and minor. +// For example, "2.53" becomes 2 and 53. +func ParseMicroversion(version string) (major int, minor int, err error) { + parts := strings.Split(version, ".") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid microversion format: %q", version) + } + major, err = strconv.Atoi(parts[0]) + if err != nil { + return 0, 0, err + } + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return 0, 0, err + } + return major, minor, nil +} + +func ParseStatus(status string) (Status, error) { + switch strings.ToUpper(status) { + case "CURRENT", "STABLE": // keystone uses STABLE instead of CURRENT + return StatusCurrent, nil + case "SUPPORTED": + return StatusSupported, nil + case "DEPRECATED": + return StatusDeprecated, nil + case "": + return StatusUnknown, nil + default: + return StatusUnknown, fmt.Errorf("invalid status: %q", status) + } +} diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go index 52fcd38ab3..9048e83def 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go @@ -7,13 +7,14 @@ import ( "errors" "io" "net/http" + "slices" "strings" "sync" ) // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v2.7.0" + DefaultUserAgent = "gophercloud/v2.9.0" DefaultMaxBackoffRetries = 60 ) @@ -437,16 +438,8 @@ func (client *ProviderClient) doRequest(ctx context.Context, method, url string, okc = defaultOkCodes(method) } - // Validate the HTTP response status. - var ok bool - for _, code := range okc { - if resp.StatusCode == code { - ok = true - break - } - } - - if !ok { + // Check the response code against the acceptable codes + if !slices.Contains(okc, resp.StatusCode) { body, _ := io.ReadAll(resp.Body) resp.Body.Close() respErr := ErrUnexpectedResponseCode{ diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/service_client.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/service_client.go index c1f9f41d4d..015c3f2339 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/service_client.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/service_client.go @@ -130,6 +130,9 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion case "baremetal-introspection": opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion + case "container-infrastructure-management", "container-infrastructure", "container-infra": + // magnum should accept container-infrastructure-management but (as of Epoxy) does not + serviceType = "container-infra" } if client.Type != "" { diff --git a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/util.go b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/util.go index ad8a7dfaaa..d11a723b1b 100644 --- a/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/util.go +++ b/hack/tools/vendor/github.com/gophercloud/gophercloud/v2/util.go @@ -37,9 +37,6 @@ func NormalizePathURL(basePath, rawPath string) (string, error) { absPathSys = filepath.Join(basePath, rawPath) u.Path = filepath.ToSlash(absPathSys) - if err != nil { - return "", err - } u.Scheme = "file" return u.String(), nil } diff --git a/hack/tools/vendor/github.com/hashicorp/go-version/LICENSE b/hack/tools/vendor/github.com/hashicorp/go-version/LICENSE index 1409d6ab92..bb1e9a486a 100644 --- a/hack/tools/vendor/github.com/hashicorp/go-version/LICENSE +++ b/hack/tools/vendor/github.com/hashicorp/go-version/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014 HashiCorp, Inc. +Copyright IBM Corp. 2014, 2025 Mozilla Public License, version 2.0 diff --git a/hack/tools/vendor/github.com/hashicorp/go-version/README.md b/hack/tools/vendor/github.com/hashicorp/go-version/README.md index 4b7806cd96..83a8249f72 100644 --- a/hack/tools/vendor/github.com/hashicorp/go-version/README.md +++ b/hack/tools/vendor/github.com/hashicorp/go-version/README.md @@ -1,6 +1,7 @@ # Versioning Library for Go + ![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) -[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-version.svg)](https://pkg.go.dev/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version @@ -12,7 +13,7 @@ Versions used with go-version must follow [SemVer](http://semver.org/). ## Installation and Usage Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). +[Go Reference](https://pkg.go.dev/github.com/hashicorp/go-version). Installation can be done with a normal `go get`: diff --git a/hack/tools/vendor/github.com/hashicorp/go-version/constraint.go b/hack/tools/vendor/github.com/hashicorp/go-version/constraint.go index 29bdc4d2b5..3964da070d 100644 --- a/hack/tools/vendor/github.com/hashicorp/go-version/constraint.go +++ b/hack/tools/vendor/github.com/hashicorp/go-version/constraint.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version @@ -8,8 +8,26 @@ import ( "regexp" "sort" "strings" + "sync" ) +var ( + constraintRegexp *regexp.Regexp + constraintRegexpOnce sync.Once +) + +func getConstraintRegexp() *regexp.Regexp { + constraintRegexpOnce.Do(func() { + // This heavy lifting only happens the first time this function is called + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + `<=|>=|!=|~>|<|>|=|`, + VersionRegexpRaw, + )) + }) + return constraintRegexp +} + // Constraint represents a single constraint for a version, such as // ">= 1.0". type Constraint struct { @@ -29,38 +47,11 @@ type Constraints []*Constraint type constraintFunc func(v, c *Version) bool -var constraintOperators map[string]constraintOperation - type constraintOperation struct { op operator f constraintFunc } -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintOperation{ - "": {op: equal, f: constraintEqual}, - "=": {op: equal, f: constraintEqual}, - "!=": {op: notEqual, f: constraintNotEqual}, - ">": {op: greaterThan, f: constraintGreaterThan}, - "<": {op: lessThan, f: constraintLessThan}, - ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, - "<=": {op: lessThanEqual, f: constraintLessThanEqual}, - "~>": {op: pessimistic, f: constraintPessimistic}, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - // NewConstraint will parse one or more constraints from the given // constraint string. The string must be a comma-separated list of // constraints. @@ -107,7 +98,7 @@ func (cs Constraints) Check(v *Version) bool { // to '>0.2' it is *NOT* treated as equal. // // Missing operator is treated as equal to '=', whitespaces -// are ignored and constraints are sorted before comaparison. +// are ignored and constraints are sorted before comparison. func (cs Constraints) Equals(c Constraints) bool { if len(cs) != len(c) { return false @@ -176,9 +167,9 @@ func (c *Constraint) String() string { } func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) + matches := getConstraintRegexp().FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) + return nil, fmt.Errorf("malformed constraint: %s", v) } check, err := NewVersion(matches[2]) @@ -186,7 +177,25 @@ func parseSingle(v string) (*Constraint, error) { return nil, err } - cop := constraintOperators[matches[1]] + var cop constraintOperation + switch matches[1] { + case "=": + cop = constraintOperation{op: equal, f: constraintEqual} + case "!=": + cop = constraintOperation{op: notEqual, f: constraintNotEqual} + case ">": + cop = constraintOperation{op: greaterThan, f: constraintGreaterThan} + case "<": + cop = constraintOperation{op: lessThan, f: constraintLessThan} + case ">=": + cop = constraintOperation{op: greaterThanEqual, f: constraintGreaterThanEqual} + case "<=": + cop = constraintOperation{op: lessThanEqual, f: constraintLessThanEqual} + case "~>": + cop = constraintOperation{op: pessimistic, f: constraintPessimistic} + default: + cop = constraintOperation{op: equal, f: constraintEqual} + } return &Constraint{ f: cop.f, diff --git a/hack/tools/vendor/github.com/hashicorp/go-version/version.go b/hack/tools/vendor/github.com/hashicorp/go-version/version.go index 7c683c2813..17b29732ee 100644 --- a/hack/tools/vendor/github.com/hashicorp/go-version/version.go +++ b/hack/tools/vendor/github.com/hashicorp/go-version/version.go @@ -1,23 +1,39 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version import ( - "bytes" "database/sql/driver" "fmt" "regexp" "strconv" "strings" + "sync" ) // The compiled regular expression used to test the validity of a version. var ( - versionRegexp *regexp.Regexp - semverRegexp *regexp.Regexp + versionRegexp *regexp.Regexp + versionRegexpOnce sync.Once + semverRegexp *regexp.Regexp + semverRegexpOnce sync.Once ) +func getVersionRegexp() *regexp.Regexp { + versionRegexpOnce.Do(func() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + }) + return versionRegexp +} + +func getSemverRegexp() *regexp.Regexp { + semverRegexpOnce.Do(func() { + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") + }) + return semverRegexp +} + // The raw regular expression string used for testing the validity // of a version. const ( @@ -42,28 +58,23 @@ type Version struct { original string } -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") - semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") -} - // NewVersion parses the given version and returns a new // Version. func NewVersion(v string) (*Version, error) { - return newVersion(v, versionRegexp) + return newVersion(v, getVersionRegexp()) } // NewSemver parses the given version and returns a new // Version that adheres strictly to SemVer specs // https://semver.org/ func NewSemver(v string) (*Version, error) { - return newVersion(v, semverRegexp) + return newVersion(v, getSemverRegexp()) } func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { matches := pattern.FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) + return nil, fmt.Errorf("malformed version: %s", v) } segmentsStr := strings.Split(matches[1], ".") segments := make([]int64, len(segmentsStr)) @@ -71,7 +82,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { val, err := strconv.ParseInt(str, 10, 64) if err != nil { return nil, fmt.Errorf( - "Error parsing version: %s", err) + "error parsing version: %s", err) } segments[i] = val @@ -174,7 +185,7 @@ func (v *Version) Compare(other *Version) int { } else if lhs < rhs { return -1 } - // Otherwis, rhs was > lhs, they're not equal + // Otherwise, rhs was > lhs, they're not equal return 1 } @@ -382,22 +393,29 @@ func (v *Version) Segments64() []int64 { // missing parts (1.0 => 1.0.0) will be made into a canonicalized form // as shown in the parenthesized examples. func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) + return string(v.bytes()) +} + +func (v *Version) bytes() []byte { + var buf []byte for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str + if i > 0 { + buf = append(buf, '.') + } + buf = strconv.AppendInt(buf, s, 10) } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) + buf = append(buf, '-') + buf = append(buf, v.pre...) } + if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) + buf = append(buf, '+') + buf = append(buf, v.metadata...) } - return buf.String() + return buf } // Original returns the original parsed version as-is, including any diff --git a/hack/tools/vendor/github.com/hashicorp/go-version/version_collection.go b/hack/tools/vendor/github.com/hashicorp/go-version/version_collection.go index 83547fe13d..11bc8b1c56 100644 --- a/hack/tools/vendor/github.com/hashicorp/go-version/version_collection.go +++ b/hack/tools/vendor/github.com/hashicorp/go-version/version_collection.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go new file mode 100644 index 0000000000..ee6ac7b5f3 --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go @@ -0,0 +1,8 @@ +//go:build !go1.25 +// +build !go1.25 + +package main + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo/automaxprocs" +) diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md new file mode 100644 index 0000000000..e249ebe8b3 --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md @@ -0,0 +1,3 @@ +This entire directory is a lightly modified clone of https://github.com/uber-go/automaxprocs + +It will be removed when Go 1.26 ships and we no longer need to support Go 1.24 (which does not correctly autodetect maxprocs in containers). diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go new file mode 100644 index 0000000000..8a762b51d6 --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package automaxprocs + +import ( + "os" + "runtime" +) + +func init() { + Set() +} + +const _maxProcsKey = "GOMAXPROCS" + +type config struct { + procs func(int, func(v float64) int) (int, CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int +} + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set() error { + cfg := &config{ + procs: CPUQuotaToGOMAXPROCS, + roundQuotaFunc: DefaultRoundFunc, + minGOMAXPROCS: 1, + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if _, exists := os.LookupEnv(_maxProcsKey); exists { + return nil + } + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) + if err != nil { + return err + } + if status == CPUQuotaUndefined { + return nil + } + runtime.GOMAXPROCS(maxProcs) + return nil +} diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go similarity index 99% rename from openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go index fe4ecf561e..a4676933e8 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go similarity index 99% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go index e89f543602..ed384891ef 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs const ( // _cgroupFSType is the Linux CGroup file system type used in diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go index 78556062fe..69a0be6b71 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go similarity index 91% rename from vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go index f9057fd273..2d83343bd9 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go @@ -21,12 +21,10 @@ //go:build linux // +build linux -package runtime +package automaxprocs import ( "errors" - - cg "go.uber.org/automaxprocs/internal/cgroups" ) // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process @@ -58,8 +56,8 @@ type queryer interface { } var ( - _newCgroups2 = cg.NewCGroups2ForCurrentProcess - _newCgroups = cg.NewCGroupsForCurrentProcess + _newCgroups2 = NewCGroups2ForCurrentProcess + _newCgroups = NewCGroupsForCurrentProcess _newQueryer = newQueryer ) @@ -68,7 +66,7 @@ func newQueryer() (queryer, error) { if err == nil { return cgroups, nil } - if errors.Is(err, cg.ErrNotV2) { + if errors.Is(err, ErrNotV2) { return _newCgroups() } return nil, err diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go similarity index 98% rename from vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go index e74701508e..d2d61e8941 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go @@ -21,7 +21,7 @@ //go:build !linux // +build !linux -package runtime +package automaxprocs // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process // to a valid GOMAXPROCS value. This is Linux-specific and not supported in the diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go similarity index 98% rename from openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go index 94ac75a46e..2e235d7d65 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import "fmt" diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go similarity index 99% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go index f3877f78aa..7c3fa306ef 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go similarity index 98% rename from vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go index f8a2834ac0..b8ec7e502a 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package runtime +package automaxprocs import "math" diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go similarity index 99% rename from openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go rename to hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go index cddc3eaec3..881ebd5902 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 2b36b2feb9..3021dfec2e 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -29,7 +29,6 @@ func BuildBuildCommand() command.Command { var errors []error cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) command.AbortIfErrors("Ginkgo detected configuration issues:", errors) - buildSpecs(args, cliConfig, goFlagsConfig) }, } diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 8e16d2bb03..f3439a3f0c 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -90,6 +90,9 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC if reporterConfig.JSONReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) } + if reporterConfig.GoJSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.GoJSONReport, GenerateFunc: reporters.GenerateGoTestJSONReport, MergeFunc: reporters.MergeAndCleanupGoTestJSONReports}) + } if reporterConfig.JUnitReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) } diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go index 41052ea19d..48c69a1d83 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -9,6 +9,7 @@ import ( "path/filepath" "regexp" "strings" + "sync/atomic" "syscall" "time" @@ -107,6 +108,9 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } @@ -156,12 +160,15 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { type procResult struct { + proc int + exitResult string passed bool hasProgrammaticFocus bool } numProcs := cliConfig.ComputedProcs() procOutput := make([]*bytes.Buffer, numProcs) + procExitResult := make([]string, numProcs) coverProfiles := []string{} blockProfiles := []string{} @@ -179,6 +186,9 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } @@ -218,16 +228,20 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig args = append(args, additionalArgs...) cmd, buf := buildAndStartCommand(suite, args, false) + var exited atomic.Bool procOutput[proc-1] = buf - server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + server.RegisterAlive(proc, func() bool { return !exited.Load() }) go func() { cmd.Wait() exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() procResults <- procResult{ + proc: proc, + exitResult: cmd.ProcessState.String(), passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, } + exited.Store(true) }() } @@ -236,6 +250,7 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig result := <-procResults passed = passed && result.passed suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + procExitResult[result.proc-1] = result.exitResult } if passed { suite.State = TestSuiteStatePassed @@ -255,6 +270,8 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Exit result of proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s\n", procExitResult[proc-1])) } fmt.Fprintf(os.Stderr, "** End **") } diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go index bd6b8fbff3..419589b48c 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - _ "go.uber.org/automaxprocs" "github.com/onsi/ginkgo/v2/ginkgo/build" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/generators" diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index a34d94354d..75cbdb4962 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -2,12 +2,9 @@ package watch import ( "go/build" - "regexp" + "strings" ) -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) -var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing - type Dependencies struct { deps map[string]int } @@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) { d.addDepIfNotPresent(pkg.Dir, depth) } } @@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) { d.deps[dep] = depth } } + +func matchesGinkgoOrGomega(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega") +} + +func matchesGinkgoIntegration(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing +} diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go new file mode 100644 index 0000000000..8b7a9ceabf --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go @@ -0,0 +1,158 @@ +package reporters + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/packages" +) + +func ptr[T any](in T) *T { + return &in +} + +type encoder interface { + Encode(v any) error +} + +// gojsonEvent matches the format from go internals +// https://github.com/golang/go/blob/master/src/cmd/internal/test2json/test2json.go#L31-L41 +// https://pkg.go.dev/cmd/test2json +type gojsonEvent struct { + Time *time.Time `json:",omitempty"` + Action GoJSONAction + Package string `json:",omitempty"` + Test string `json:",omitempty"` + Elapsed *float64 `json:",omitempty"` + Output *string `json:",omitempty"` + FailedBuild string `json:",omitempty"` +} + +type GoJSONAction string + +const ( + // start - the test binary is about to be executed + GoJSONStart GoJSONAction = "start" + // run - the test has started running + GoJSONRun GoJSONAction = "run" + // pause - the test has been paused + GoJSONPause GoJSONAction = "pause" + // cont - the test has continued running + GoJSONCont GoJSONAction = "cont" + // pass - the test passed + GoJSONPass GoJSONAction = "pass" + // bench - the benchmark printed log output but did not fail + GoJSONBench GoJSONAction = "bench" + // fail - the test or benchmark failed + GoJSONFail GoJSONAction = "fail" + // output - the test printed output + GoJSONOutput GoJSONAction = "output" + // skip - the test was skipped or the package contained no tests + GoJSONSkip GoJSONAction = "skip" +) + +func goJSONActionFromSpecState(state types.SpecState) GoJSONAction { + switch state { + case types.SpecStateInvalid: + return GoJSONFail + case types.SpecStatePending: + return GoJSONSkip + case types.SpecStateSkipped: + return GoJSONSkip + case types.SpecStatePassed: + return GoJSONPass + case types.SpecStateFailed: + return GoJSONFail + case types.SpecStateAborted: + return GoJSONFail + case types.SpecStatePanicked: + return GoJSONFail + case types.SpecStateInterrupted: + return GoJSONFail + case types.SpecStateTimedout: + return GoJSONFail + default: + panic("unexpected state should not happen") + } +} + +// gojsonReport wraps types.Report and calcualtes extra fields requires by gojson +type gojsonReport struct { + o types.Report + // Extra calculated fields + goPkg string + elapsed float64 +} + +func newReport(in types.Report) *gojsonReport { + return &gojsonReport{ + o: in, + } +} + +func (r *gojsonReport) Fill() error { + // NOTE: could the types.Report include the go package name? + goPkg, err := suitePathToPkg(r.o.SuitePath) + if err != nil { + return err + } + r.goPkg = goPkg + r.elapsed = r.o.RunTime.Seconds() + return nil +} + +// gojsonSpecReport wraps types.SpecReport and calculates extra fields required by gojson +type gojsonSpecReport struct { + o types.SpecReport + // extra calculated fields + testName string + elapsed float64 + action GoJSONAction +} + +func newSpecReport(in types.SpecReport) *gojsonSpecReport { + return &gojsonSpecReport{ + o: in, + } +} + +func (sr *gojsonSpecReport) Fill() error { + sr.elapsed = sr.o.RunTime.Seconds() + sr.testName = createTestName(sr.o) + sr.action = goJSONActionFromSpecState(sr.o.State) + return nil +} + +func suitePathToPkg(dir string) (string, error) { + cfg := &packages.Config{ + Mode: packages.NeedFiles | packages.NeedSyntax, + } + pkgs, err := packages.Load(cfg, dir) + if err != nil { + return "", err + } + if len(pkgs) != 1 { + return "", errors.New("error") + } + return pkgs[0].ID, nil +} + +func createTestName(spec types.SpecReport) string { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } + name = strings.TrimSpace(name) + return name +} diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go new file mode 100644 index 0000000000..ec5311d069 --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go @@ -0,0 +1,111 @@ +package reporters + +type GoJSONEventWriter struct { + enc encoder + specSystemErrFn specSystemExtractFn + specSystemOutFn specSystemExtractFn +} + +func NewGoJSONEventWriter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONEventWriter { + return &GoJSONEventWriter{ + enc: enc, + specSystemErrFn: errFn, + specSystemOutFn: outFn, + } +} + +func (r *GoJSONEventWriter) writeEvent(e *gojsonEvent) error { + return r.enc.Encode(e) +} + +func (r *GoJSONEventWriter) WriteSuiteStart(report *gojsonReport) error { + e := &gojsonEvent{ + Time: &report.o.StartTime, + Action: GoJSONStart, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSuiteResult(report *gojsonReport) error { + var action GoJSONAction + switch { + case report.o.PreRunStats.SpecsThatWillRun == 0: + action = GoJSONSkip + case report.o.SuiteSucceeded: + action = GoJSONPass + default: + action = GoJSONFail + } + e := &gojsonEvent{ + Time: &report.o.EndTime, + Action: action, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + Elapsed: ptr(report.elapsed), + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecStart(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.StartTime, + Action: GoJSONRun, + Test: specReport.testName, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecOut(report *gojsonReport, specReport *gojsonSpecReport) error { + events := []*gojsonEvent{} + + stdErr := r.specSystemErrFn(specReport.o) + if stdErr != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdErr), + FailedBuild: "", + }) + } + stdOut := r.specSystemOutFn(specReport.o) + if stdOut != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdOut), + FailedBuild: "", + }) + } + + for _, ev := range events { + err := r.writeEvent(ev) + if err != nil { + return err + } + } + return nil +} + +func (r *GoJSONEventWriter) WriteSpecResult(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: specReport.action, + Test: specReport.testName, + Package: report.goPkg, + Elapsed: ptr(specReport.elapsed), + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go new file mode 100644 index 0000000000..633e49b88d --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go @@ -0,0 +1,45 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type GoJSONReporter struct { + ev *GoJSONEventWriter +} + +type specSystemExtractFn func (spec types.SpecReport) string + +func NewGoJSONReporter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONReporter { + return &GoJSONReporter{ + ev: NewGoJSONEventWriter(enc, errFn, outFn), + } +} + +func (r *GoJSONReporter) Write(originalReport types.Report) error { + // suite start events + report := newReport(originalReport) + err := report.Fill() + if err != nil { + return err + } + r.ev.WriteSuiteStart(report) + for _, originalSpecReport := range originalReport.SpecReports { + specReport := newSpecReport(originalSpecReport) + err := specReport.Fill() + if err != nil { + return err + } + if specReport.o.LeafNodeType == types.NodeTypeIt { + // handle any It leaf node as a spec + r.ev.WriteSpecStart(report, specReport) + r.ev.WriteSpecOut(report, specReport) + r.ev.WriteSpecResult(report, specReport) + } else { + // handle any other leaf node as generic output + r.ev.WriteSpecOut(report, specReport) + } + } + r.ev.WriteSuiteResult(report) + return nil +} diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 74ad0768b7..026d9cf9b3 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { if len(report.SuiteLabels) > 0 { r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) } + if len(report.SuiteSemVerConstraints) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", "))) + } r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) @@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { bannerWidth = len(labels) + 2 } } + if len(report.SuiteSemVerConstraints) > 0 { + semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints)) + if len(semVerConstraints)+2 > bannerWidth { + bannerWidth = len(semVerConstraints) + 2 + } + } r.emitBlock(strings.Repeat("=", bannerWidth)) out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) @@ -371,13 +381,22 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim cursor := 0 for _, entry := range timeline { tl := entry.GetTimelineLocation() - if tl.Offset < len(gw) { - r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) - cursor = tl.Offset - } else if cursor < len(gw) { + + end := tl.Offset + if end > len(gw) { + end = len(gw) + } + if end < cursor { + end = cursor + } + if cursor < end && cursor <= len(gw) && end <= len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:end])) + cursor = end + } else if cursor < len(gw) && end == len(gw) { r.emit(r.fi(indent, "%s", gw[cursor:])) cursor = len(gw) } + switch x := entry.(type) { case types.Failure: if isVeryVerbose { @@ -394,7 +413,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim case types.ReportEntry: r.emitReportEntry(indent, x) case types.ProgressReport: - r.emitProgressReport(indent, false, x) + r.emitProgressReport(indent, false, isVeryVerbose, x) case types.SpecEvent: if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { r.emitSpecEvent(indent, x, isVeryVerbose) @@ -448,7 +467,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur if !failure.ProgressReport.IsZero() { r.emitBlock("\n") - r.emitProgressReport(indent, false, failure.ProgressReport) + r.emitProgressReport(indent, false, false, failure.ProgressReport) } if failure.AdditionalFailure != nil && includeAdditionalFailure { @@ -464,11 +483,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) - r.emitProgressReport(1, shouldEmitGW, report) + r.emitProgressReport(1, shouldEmitGW, true, report) r.emitDelimiter(1) } -func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) { if report.Message != "" { r.emitBlock(r.fi(indent, report.Message+"\n")) indent += 1 @@ -504,6 +523,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::group::Progress Report")) + } + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) @@ -550,6 +573,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) r.emitGoroutines(indent, otherGoroutines...) } + + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::endgroup::")) + } } func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { @@ -698,8 +725,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { } func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { - texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} - texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{} + texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...) if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) @@ -707,6 +734,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) + semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints) locations = append(locations, report.LeafNodeLocation) failureLocation := report.Failure.FailureNodeLocation @@ -720,6 +748,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + semVerConstraints = append([][]string{{}}, semVerConstraints...) highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex @@ -747,6 +776,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(labels[i]) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) } + if len(semVerConstraints[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", ")) + } out += "\n" out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } @@ -770,6 +802,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) } + flattenedSemVerConstraints := report.SemVerConstraints() + if len(flattenedSemVerConstraints) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", ")) + } out += "\n" if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go new file mode 100644 index 0000000000..d02fb7a1ae --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go @@ -0,0 +1,61 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/internal/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +// GenerateGoTestJSONReport produces a JSON-formatted in the test2json format used by `go test -json` +func GenerateGoTestJSONReport(report types.Report, destination string) error { + // walk report and generate test2json-compatible objects + // JSON-encode the objects into filename + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } + f, err := os.Create(destination) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + r := reporters.NewGoJSONReporter( + enc, + systemErrForUnstructuredReporters, + systemOutForUnstructuredReporters, + ) + return r.Write(report) +} + +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupGoTestJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } + f, err := os.Create(destination) + if err != nil { + return messages, err + } + defer f.Close() + + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + _, err = f.Write(data) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not write to %s:\n%s", destination, err.Error())) + continue + } + os.Remove(source) + } + return messages, nil +} diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 562e0f62ba..828f893fb8 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -36,6 +36,9 @@ type JunitReportConfig struct { // Enable OmitSpecLabels to prevent labels from appearing in the spec name OmitSpecLabels bool + // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name + OmitSpecSemVerConstraints bool + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool @@ -169,9 +172,11 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))}, {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"SemVerFilter", report.SuiteConfig.SemVerFilter}, {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, @@ -207,6 +212,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit owner = matches[1] } } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = strings.TrimSpace(name) test := JUnitTestCase{ diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index e990ad82e1..55e1d1f4f7 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error { name := report.SuiteDescription labels := report.SuiteLabels + semVerConstraints := report.SuiteSemVerConstraints if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) for _, spec := range report.SpecReports { name := fmt.Sprintf("[%s]", spec.LeafNodeType) @@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error { if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = tcEscape(name) fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/around_node.go new file mode 100644 index 0000000000..a069e0623d --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/around_node.go @@ -0,0 +1,56 @@ +package types + +import ( + "context" +) + +type AroundNodeAllowedFuncs interface { + ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func() +} +type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context)) + +func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator { + if f == nil { + panic("BuildAroundNode cannot be called with a nil function.") + } + var aroundNodeFunc func(context.Context, func(context.Context)) + switch x := any(f).(type) { + case func(context.Context, func(context.Context)): + aroundNodeFunc = x + case func(context.Context) context.Context: + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + ctx = x(ctx) + body(ctx) + } + case func(): + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + x() + body(ctx) + } + } + + return AroundNodeDecorator{ + Body: aroundNodeFunc, + CodeLocation: cl, + } +} + +type AroundNodeDecorator struct { + Body AroundNodeFunc + CodeLocation CodeLocation +} + +type AroundNodes []AroundNodeDecorator + +func (an AroundNodes) Clone() AroundNodes { + out := make(AroundNodes, len(an)) + copy(out, an) + return out +} + +func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes { + out := make(AroundNodes, len(an)+len(other)) + copy(out, an) + copy(out[len(an):], other) + return out +} diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/config.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/config.go index 2e827efe30..f847036046 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -24,6 +24,7 @@ type SuiteConfig struct { FocusFiles []string SkipFiles []string LabelFilter string + SemVerFilter string FailOnPending bool FailOnEmpty bool FailFast bool @@ -95,6 +96,7 @@ type ReporterConfig struct { ForceNewlines bool JSONReport string + GoJSONReport string JUnitReport string TeamcityReport string } @@ -111,7 +113,7 @@ func (rc ReporterConfig) Verbosity() VerbosityLevel { } func (rc ReporterConfig) WillGenerateReport() bool { - return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" + return rc.JSONReport != "" || rc.GoJSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" } func NewDefaultReporterConfig() ReporterConfig { @@ -308,6 +310,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version", + Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", @@ -356,6 +360,8 @@ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.GoJSONReport", Name: "gojson-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Go JSON-formatted test report at the specified location."}, {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", @@ -443,6 +449,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re } } + if suiteConfig.SemVerFilter != "" { + _, err := ParseSemVerFilter(suiteConfig.SemVerFilter) + if err != nil { + errors = append(errors, err) + } + } + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { case "", "dup", "swap", "none": default: @@ -573,6 +586,9 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, +} + +var GoBuildOFlags = GinkgoFlags{ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", Usage: "output binary path (including name)."}, } @@ -673,7 +689,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( - GoBuildFlags, + GoBuildFlags.CopyAppend(GoBuildOFlags...), map[string]any{ "Go": &goFlagsConfig, }, @@ -763,6 +779,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoBuildOFlags...) bindings := map[string]any{ "C": cliConfig, diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/errors.go index c2796b5490..59313238cf 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { } } +func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid SemVerConstraint", + Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg), + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + +func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty SemVerConstraint", + Message: "SemVerConstraint cannot be empty", + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + /* Table errors */ func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { return GinkgoError{ diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go new file mode 100644 index 0000000000..3fc2ed144b --- /dev/null +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go @@ -0,0 +1,60 @@ +package types + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" +) + +type SemVerFilter func([]string) bool + +func MustParseSemVerFilter(input string) SemVerFilter { + filter, err := ParseSemVerFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) { + if filterVersion == "" { + return func(_ []string) bool { return true }, nil + } + + targetVersion, err := semver.NewVersion(filterVersion) + if err != nil { + return nil, fmt.Errorf("invalid filter version: %w", err) + } + + return func(constraints []string) bool { + // unconstrained specs always run + if len(constraints) == 0 { + return true + } + + for _, constraintStr := range constraints { + constraint, err := semver.NewConstraint(constraintStr) + if err != nil { + return false + } + + if !constraint.Check(targetVersion) { + return false + } + } + + return true + }, nil +} + +func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) { + if len(semVerConstraint) == 0 { + return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl) + } + _, err := semver.NewConstraint(semVerConstraint) + if err != nil { + return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl) + } + + return semVerConstraint, nil +} diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/types.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/types.go index ddcbec1ba8..9981a0dd68 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" "sort" "strings" "time" @@ -19,6 +20,57 @@ func init() { } } +// ConstructionNodeReport captures information about a Ginkgo spec. +type ConstructionNodeReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // IsSerial captures whether the any container has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether any container is an Ordered container + IsInOrderedContainer bool +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report ConstructionNodeReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report ConstructionNodeReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + + return out +} + // Report captures information about a Ginkgo test run type Report struct { //SuitePath captures the absolute path to the test suite @@ -30,6 +82,9 @@ type Report struct { //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function SuiteLabels []string + //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function + SuiteSemVerConstraints []string + //SuiteSucceeded captures the success or failure status of the test run //If true, the test run is considered successful. //If false, the test run is considered unsuccessful @@ -129,13 +184,21 @@ type SpecReport struct { // all Describe/Context/When containers in this spec's hierarchy ContainerHierarchyLabels [][]string - // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be // one of the NodeTypesForSuiteLevelNodes node types) - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + + // Captures the Spec Priority + SpecPriority int // State captures whether the spec has passed, failed, etc. State SpecState @@ -198,48 +261,52 @@ type SpecReport struct { func (report SpecReport) MarshalJSON() ([]byte, error) { //All this to avoid emitting an empty Failure struct in the JSON out := struct { - ContainerHierarchyTexts []string - ContainerHierarchyLocations []CodeLocation - ContainerHierarchyLabels [][]string - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string - State SpecState - StartTime time.Time - EndTime time.Time - RunTime time.Duration - ParallelProcess int - Failure *Failure `json:",omitempty"` - NumAttempts int - MaxFlakeAttempts int - MaxMustPassRepeatedly int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` - ProgressReports []ProgressReport `json:",omitempty"` - AdditionalFailures []AdditionalFailure `json:",omitempty"` - SpecEvents SpecEvents `json:",omitempty"` + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + ContainerHierarchySemVerConstraints [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ - ContainerHierarchyTexts: report.ContainerHierarchyTexts, - ContainerHierarchyLocations: report.ContainerHierarchyLocations, - ContainerHierarchyLabels: report.ContainerHierarchyLabels, - LeafNodeType: report.LeafNodeType, - LeafNodeLocation: report.LeafNodeLocation, - LeafNodeLabels: report.LeafNodeLabels, - LeafNodeText: report.LeafNodeText, - State: report.State, - StartTime: report.StartTime, - EndTime: report.EndTime, - RunTime: report.RunTime, - ParallelProcess: report.ParallelProcess, - Failure: nil, - ReportEntries: nil, - NumAttempts: report.NumAttempts, - MaxFlakeAttempts: report.MaxFlakeAttempts, - MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, - CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, - CapturedStdOutErr: report.CapturedStdOutErr, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, } if !report.Failure.IsZero() { @@ -287,6 +354,9 @@ func (report SpecReport) FullText() string { if report.LeafNodeText != "" { texts = append(texts, report.LeafNodeText) } + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) return strings.Join(texts, " ") } @@ -312,6 +382,28 @@ func (report SpecReport) Labels() []string { return out } +// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints. +func (report SpecReport) SemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints { + for _, semVerConstraint := range semVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + } + for _, semVerConstraint := range report.LeafNodeSemVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + + return out +} + // MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) @@ -321,6 +413,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } +// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) { + filter, err := ParseSemVerFilter(version) + if err != nil { + return false, err + } + return filter(report.SemVerConstraints()), nil +} + // FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName diff --git a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/version.go b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/version.go index 158ac2fd89..2a50192871 100644 --- a/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/hack/tools/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.23.4" +const VERSION = "2.27.3" diff --git a/hack/tools/vendor/github.com/onsi/gomega/CHANGELOG.md b/hack/tools/vendor/github.com/onsi/gomega/CHANGELOG.md index 890d892228..b7d7309f3f 100644 --- a/hack/tools/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/hack/tools/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,32 @@ +## 1.38.2 + +- roll back to go 1.23.0 [c404969] + +## 1.38.1 + +### Fixes + +Numerous minor fixes and dependency bumps + +## 1.38.0 + +### Features +- gstruct handles extra unexported fields [4ee7ed0] + +### Fixes +- support [] in IgnoringTopFunction function signatures (#851) [36bbf72] + +### Maintenance +- Bump golang.org/x/net from 0.40.0 to 0.41.0 (#846) [529d408] +- Fix typo [acd1f55] +- Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#835) [bae65a0] +- Bump nokogiri from 1.18.4 to 1.18.8 in /docs (#842) [8dda91f] +- Bump golang.org/x/net from 0.39.0 to 0.40.0 (#843) [212d812] +- Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#839) [59bd7f9] +- Bump nokogiri from 1.18.1 to 1.18.4 in /docs (#834) [328c729] +- Bump uri from 1.0.2 to 1.0.3 in /docs (#826) [9a798a1] +- Bump golang.org/x/net from 0.37.0 to 0.39.0 (#841) [04a72c6] + ## 1.37.0 ### Features diff --git a/hack/tools/vendor/github.com/onsi/gomega/gomega_dsl.go b/hack/tools/vendor/github.com/onsi/gomega/gomega_dsl.go index a491a64be7..fdba34ee9d 100644 --- a/hack/tools/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/hack/tools/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.37.0" +const GOMEGA_VERSION = "1.38.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -178,7 +178,7 @@ func ensureDefaultGomegaIsConfigured() { // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: // diff --git a/hack/tools/vendor/github.com/onsi/gomega/internal/async_assertion.go b/hack/tools/vendor/github.com/onsi/gomega/internal/async_assertion.go index a3a646e4ad..4121505b62 100644 --- a/hack/tools/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/hack/tools/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -452,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } else { var fgErr formattedGomegaError - if errors.As(actualErr, &fgErr) { + if errors.As(matcherErr, &fgErr) { message += fgErr.FormattedGomegaError() + "\n" } else { message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) diff --git a/hack/tools/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/hack/tools/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 532fc37449..ce74eee4c7 100644 --- a/hack/tools/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/hack/tools/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -2,6 +2,7 @@ package matchers import ( "bytes" + "errors" "fmt" "github.com/google/go-cmp/cmp" @@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr if err, ok := r.(error); ok { matchErr = err } else if errMsg, ok := r.(string); ok { - matchErr = fmt.Errorf(errMsg) + matchErr = errors.New(errMsg) } } }() diff --git a/hack/tools/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/hack/tools/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 95057c26cc..c3da9bd48b 100644 --- a/hack/tools/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/hack/tools/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type MatchYAMLMatcher struct { diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c0..2331b8b4f3 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index 8b016355ad..7bac0da33d 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { groups = append(groups, group) } return groups @@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) + _, err := fmt.Fprintf(buf, format, args...) return err } ws := func(s string) error { diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef926..d273b6640e 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f292..5fe8d3b4d2 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 592eec3e24..76e59f1288 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { case pb.Counter != nil: pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] case pb.Histogram != nil: + h := pb.Histogram for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && + e.GetTimestamp() != nil { + h.Exemplars = append(h.Exemplars, e) + if len(h.Bucket) == 0 { + // Don't proceed to classic buckets if there are none. + continue + } + } + // h.Bucket are sorted by UpperBound. + i := sort.Search(len(h.Bucket), func(i int) bool { + return h.Bucket[i].GetUpperBound() >= e.GetValue() }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e + if i < len(h.Bucket) { + h.Bucket[i].Exemplar = e } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + CumulativeCount: proto.Uint64(h.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + h.Bucket = append(h.Bucket, b) } } default: @@ -227,6 +237,7 @@ type Exemplar struct { // Only last applicable exemplar is injected from the list. // For example for Counter it means last exemplar is injected. // For Histogram, it means last applicable exemplar for each bucket is injected. +// For a Native Histogram, all valid exemplars are injected. // // NewMetricWithExemplars works best with MustNewConstMetric and // MustNewConstHistogram, see example. diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go index 0a61b98461..b32c95fa3f 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -25,9 +25,9 @@ import ( "golang.org/x/sys/unix" ) -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// errNotImplemented is returned by stub functions that replace cgo functions, when cgo // isn't available. -var notImplementedErr = errors.New("not implemented") +var errNotImplemented = errors.New("not implemented") type memoryInfo struct { vsize uint64 // Virtual memory size in bytes @@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if memInfo, err := getMemory(); err == nil { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { + } else if !errors.Is(err, errNotImplemented) { // Don't report an error when support is not compiled in. c.reportError(ch, c.rss, err) c.reportError(ch, c.vsize, err) diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go index 8ddb0995d6..378865129b 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -16,7 +16,7 @@ package prometheus func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr + return nil, errNotImplemented } // describe returns all descriptions of the collector for Darwin. diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 9f4b130bef..8074f70f5d 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if netstat, err := p.Netstat(); err == nil { var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets + if netstat.InOctets != nil { + inOctets = *netstat.InOctets } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets + if netstat.OutOctets != nil { + outOctets = *netstat.OutOctets } ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 356edb7868..9332b0249a 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { labels := prometheus.Labels{} - if !(code || method) { + if !code && !method { return labels } diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 1258508e4f..80a4d7c355 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -262,7 +262,7 @@ func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNa // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { - var tp expfmt.TextParser + tp := expfmt.NewTextParser(model.UTF8Validation) notNormalized, err := tp.TextToMetricFamilies(reader) if err != nil { return nil, fmt.Errorf("converting reader to metric families failed: %w", err) diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 2c808eece0..487b466563 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { return false } - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) + return m.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those @@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool { return false } - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) + return m.deleteByHashWithLabels(h, labels, m.curry) } // DeletePartialMatch deletes all metrics where the variable labels contain all of those @@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels, closer := constrainLabels(m.desc, labels) defer closer() - return m.metricMap.deleteByLabels(labels, m.curry) + return m.deleteByLabels(labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't @@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names @@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 25da157f15..2ed1285068 100644 --- a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, +// fact, those metrics are already prefixed with "go_" or "process_", // respectively.) // // Conflicts between Collectors registered through the original Registerer with @@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { } } +// WrapCollectorWith returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapCollectorWith can be useful to work with multiple instances of a third +// party library that does not expose enough flexibility on the lifecycle of its +// registered metrics. +// For example, let's say you have a foo.New(reg Registerer) constructor that +// registers metrics but never unregisters them, and you want to create multiple +// instances of foo.Foo with different labels. +// The way to achieve that, is to create a new Registry, pass it to foo.New, +// then use WrapCollectorWith to wrap that Registry with the desired labels and +// register that as a collector in your main Registry. +// Then you can un-register the wrapped collector effectively un-registering the +// metrics registered by foo.New. +func WrapCollectorWith(labels Labels, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + labels: labels, + } +} + +// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided prefix to the name of all Metrics it collects. +// +// See the documentation of WrapCollectorWith for more details on the use case. +func WrapCollectorWithPrefix(prefix string, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + prefix: prefix, + } +} + type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string diff --git a/hack/tools/vendor/github.com/prometheus/common/expfmt/decode.go b/hack/tools/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f..7b762370e2 100644 --- a/hack/tools/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/hack/tools/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/hack/tools/vendor/github.com/prometheus/common/expfmt/encode.go b/hack/tools/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f55..73c24dfbc9 100644 --- a/hack/tools/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/hack/tools/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/hack/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go b/hack/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560d..c34c7de432 100644 --- a/hack/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/hack/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/hack/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go b/hack/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4e..0290f6abc4 100644 --- a/hack/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/hack/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/hack/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/hack/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1f..8dbf6d04ed 100644 --- a/hack/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/hack/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/hack/tools/vendor/github.com/prometheus/common/expfmt/text_create.go b/hack/tools/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b33..c4e9c1bbc3 100644 --- a/hack/tools/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/hack/tools/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/hack/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go b/hack/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d2..8f2edde324 100644 --- a/hack/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/hack/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,25 +363,30 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/hack/tools/vendor/github.com/prometheus/common/model/alert.go b/hack/tools/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..460f554f29 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/alert.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/hack/tools/vendor/github.com/prometheus/common/model/labels.go b/hack/tools/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..dfeb34be5f 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/labels.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,33 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/hack/tools/vendor/github.com/prometheus/common/model/labelset.go b/hack/tools/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da33..9de47b2568 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/labelset.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/hack/tools/vendor/github.com/prometheus/common/model/metric.go b/hack/tools/vendor/github.com/prometheus/common/model/metric.go index 5766107cf9..3feebf328a 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/metric.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,17 +24,30 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,16 +64,151 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -89,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -163,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -298,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -315,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -333,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -397,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/hack/tools/vendor/github.com/prometheus/common/model/time.go b/hack/tools/vendor/github.com/prometheus/common/model/time.go index 5727452c1e..1730b0fdc1 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/time.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/hack/tools/vendor/github.com/prometheus/common/model/value.go b/hack/tools/vendor/github.com/prometheus/common/model/value.go index 8050637d82..a9995a37ee 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/value.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/hack/tools/vendor/github.com/prometheus/common/model/value_histogram.go b/hack/tools/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e83..91ce5b7a45 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/hack/tools/vendor/github.com/prometheus/common/model/value_type.go b/hack/tools/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee63..078910f46b 100644 --- a/hack/tools/vendor/github.com/prometheus/common/model/value_type.go +++ b/hack/tools/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/hack/tools/vendor/github.com/prometheus/procfs/.golangci.yml b/hack/tools/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67a..3c3bf910fd 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/hack/tools/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/hack/tools/vendor/github.com/prometheus/procfs/Makefile.common b/hack/tools/vendor/github.com/prometheus/procfs/Makefile.common index 1617292350..0ed55c2ba2 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/Makefile.common +++ b/hack/tools/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.0.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/hack/tools/vendor/github.com/prometheus/procfs/README.md b/hack/tools/vendor/github.com/prometheus/procfs/README.md index 1224816c2a..0718239cf1 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/README.md +++ b/hack/tools/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/hack/tools/vendor/github.com/prometheus/procfs/arp.go b/hack/tools/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7ccc..2e53344151 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/arp.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/hack/tools/vendor/github.com/prometheus/procfs/fs.go b/hack/tools/vendor/github.com/prometheus/procfs/fs.go index 4980c875bf..9bdaccc7c8 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/fs.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/hack/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/hack/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69a..1b5bdbdf84 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/fscache.go b/hack/tools/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa03..7db8633077 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/fscache.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/hack/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/hack/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610e..3a43e83915 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/hack/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go b/hack/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc788..5a7d2df06a 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/hack/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/hack/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875ceec..d5404a6d72 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/hack/tools/vendor/github.com/prometheus/procfs/mountstats.go b/hack/tools/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c810..50caa73274 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/mountstats.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/hack/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 0000000000..f50b38e352 --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/hack/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go b/hack/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a4..19e3378f72 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/hack/tools/vendor/github.com/prometheus/procfs/net_protocols.go b/hack/tools/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709f..8d4b1ac05b 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/net_tcp.go b/hack/tools/vendor/github.com/prometheus/procfs/net_tcp.go index 5277629557..0396d72015 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/net_unix.go b/hack/tools/vendor/github.com/prometheus/procfs/net_unix.go index d868cebdaa..d7e0cacb4c 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/net_unix.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc.go b/hack/tools/vendor/github.com/prometheus/procfs/proc.go index 142796368f..368187fa88 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f571..4a64347c03 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_io.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_io.go index 776f349717..d15b66ddb6 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_io.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_netstat.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d794..4248c1716e 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_smaps.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e8208..9a297afcf8 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642a..4bdc90b07e 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a13..fb7fd3995b 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_status.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_status.go index a055197c63..dd8aa56885 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_status.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/hack/tools/vendor/github.com/prometheus/procfs/proc_sys.go b/hack/tools/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef8..3810d1ac99 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/hack/tools/vendor/github.com/prometheus/procfs/softirqs.go b/hack/tools/vendor/github.com/prometheus/procfs/softirqs.go index 28708e0745..403e6ae708 100644 --- a/hack/tools/vendor/github.com/prometheus/procfs/softirqs.go +++ b/hack/tools/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/hack/tools/vendor/github.com/spf13/pflag/README.md b/hack/tools/vendor/github.com/spf13/pflag/README.md index 7eacc5bdbe..388c4e5ead 100644 --- a/hack/tools/vendor/github.com/spf13/pflag/README.md +++ b/hack/tools/vendor/github.com/spf13/pflag/README.md @@ -284,6 +284,33 @@ func main() { } ``` +### Using pflag with go test +`pflag` does not parse the shorthand versions of go test's built-in flags (i.e., those starting with `-test.`). +For more context, see issues [#63](https://github.com/spf13/pflag/issues/63) and [#238](https://github.com/spf13/pflag/issues/238) for more details. + +For example, if you use pflag in your `TestMain` function and call `pflag.Parse()` after defining your custom flags, running a test like this: +```bash +go test /your/tests -run ^YourTest -v --your-test-pflags +``` +will result in the `-v` flag being ignored. This happens because of the way pflag handles flag parsing, skipping over go test's built-in shorthand flags. +To work around this, you can use the `ParseSkippedFlags` function, which ensures that go test's flags are parsed separately using the standard flag package. + +**Example**: You want to parse go test flags that are otherwise ignore by `pflag.Parse()` +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.ParseSkippedFlags(os.Args[1:], goflag.CommandLine) + flag.Parse() +} +``` + ## More info You can see the full reference documentation of the pflag package diff --git a/hack/tools/vendor/github.com/spf13/pflag/bool_func.go b/hack/tools/vendor/github.com/spf13/pflag/bool_func.go new file mode 100644 index 0000000000..83d77afa89 --- /dev/null +++ b/hack/tools/vendor/github.com/spf13/pflag/bool_func.go @@ -0,0 +1,40 @@ +package pflag + +// -- func Value +type boolfuncValue func(string) error + +func (f boolfuncValue) Set(s string) error { return f(s) } + +func (f boolfuncValue) Type() string { return "boolfunc" } + +func (f boolfuncValue) String() string { return "" } // same behavior as stdlib 'flag' package + +func (f boolfuncValue) IsBoolFlag() bool { return true } + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func (f *FlagSet) BoolFunc(name string, usage string, fn func(string) error) { + f.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + var val Value = boolfuncValue(fn) + flag := f.VarPF(val, name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func BoolFunc(name string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, shorthand, usage, fn) +} diff --git a/hack/tools/vendor/github.com/spf13/pflag/count.go b/hack/tools/vendor/github.com/spf13/pflag/count.go index a0b2679f71..d49c0143c1 100644 --- a/hack/tools/vendor/github.com/spf13/pflag/count.go +++ b/hack/tools/vendor/github.com/spf13/pflag/count.go @@ -85,7 +85,7 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func Count(name string, usage string) *int { return CommandLine.CountP(name, "", usage) } diff --git a/hack/tools/vendor/github.com/spf13/pflag/errors.go b/hack/tools/vendor/github.com/spf13/pflag/errors.go new file mode 100644 index 0000000000..ff11b66bef --- /dev/null +++ b/hack/tools/vendor/github.com/spf13/pflag/errors.go @@ -0,0 +1,149 @@ +package pflag + +import "fmt" + +// notExistErrorMessageType specifies which flavor of "flag does not exist" +// is printed by NotExistError. This allows the related errors to be grouped +// under a single NotExistError struct without making a breaking change to +// the error message text. +type notExistErrorMessageType int + +const ( + flagNotExistMessage notExistErrorMessageType = iota + flagNotDefinedMessage + flagNoSuchFlagMessage + flagUnknownFlagMessage + flagUnknownShorthandFlagMessage +) + +// NotExistError is the error returned when trying to access a flag that +// does not exist in the FlagSet. +type NotExistError struct { + name string + specifiedShorthands string + messageType notExistErrorMessageType +} + +// Error implements error. +func (e *NotExistError) Error() string { + switch e.messageType { + case flagNotExistMessage: + return fmt.Sprintf("flag %q does not exist", e.name) + + case flagNotDefinedMessage: + return fmt.Sprintf("flag accessed but not defined: %s", e.name) + + case flagNoSuchFlagMessage: + return fmt.Sprintf("no such flag -%v", e.name) + + case flagUnknownFlagMessage: + return fmt.Sprintf("unknown flag: --%s", e.name) + + case flagUnknownShorthandFlagMessage: + c := rune(e.name[0]) + return fmt.Sprintf("unknown shorthand flag: %q in -%s", c, e.specifiedShorthands) + } + + panic(fmt.Errorf("unknown flagNotExistErrorMessageType: %v", e.messageType)) +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *NotExistError) GetSpecifiedName() string { + return e.name +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *NotExistError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// ValueRequiredError is the error returned when a flag needs an argument but +// no argument was provided. +type ValueRequiredError struct { + flag *Flag + specifiedName string + specifiedShorthands string +} + +// Error implements error. +func (e *ValueRequiredError) Error() string { + if len(e.specifiedShorthands) > 0 { + c := rune(e.specifiedName[0]) + return fmt.Sprintf("flag needs an argument: %q in -%s", c, e.specifiedShorthands) + } + + return fmt.Sprintf("flag needs an argument: --%s", e.specifiedName) +} + +// GetFlag returns the flag for which the error occurred. +func (e *ValueRequiredError) GetFlag() *Flag { + return e.flag +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *ValueRequiredError) GetSpecifiedName() string { + return e.specifiedName +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *ValueRequiredError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// InvalidValueError is the error returned when an invalid value is used +// for a flag. +type InvalidValueError struct { + flag *Flag + value string + cause error +} + +// Error implements error. +func (e *InvalidValueError) Error() string { + flag := e.flag + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Sprintf("invalid argument %q for %q flag: %v", e.value, flagName, e.cause) +} + +// Unwrap implements errors.Unwrap. +func (e *InvalidValueError) Unwrap() error { + return e.cause +} + +// GetFlag returns the flag for which the error occurred. +func (e *InvalidValueError) GetFlag() *Flag { + return e.flag +} + +// GetValue returns the invalid value that was provided. +func (e *InvalidValueError) GetValue() string { + return e.value +} + +// InvalidSyntaxError is the error returned when a bad flag name is passed on +// the command line. +type InvalidSyntaxError struct { + specifiedFlag string +} + +// Error implements error. +func (e *InvalidSyntaxError) Error() string { + return fmt.Sprintf("bad flag syntax: %s", e.specifiedFlag) +} + +// GetSpecifiedName returns the exact flag (with dashes) as it +// appeared in the parsed arguments. +func (e *InvalidSyntaxError) GetSpecifiedFlag() string { + return e.specifiedFlag +} diff --git a/hack/tools/vendor/github.com/spf13/pflag/flag.go b/hack/tools/vendor/github.com/spf13/pflag/flag.go index 7c058de374..2fd3c57597 100644 --- a/hack/tools/vendor/github.com/spf13/pflag/flag.go +++ b/hack/tools/vendor/github.com/spf13/pflag/flag.go @@ -27,23 +27,32 @@ unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } + Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) @@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } flag.VarP(&flagval, "varname", "v", "help message") + Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: + --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags -f -abc @@ -124,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -145,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -381,7 +404,7 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { flag := f.Lookup(name) if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) + err := &NotExistError{name: name, messageType: flagNotDefinedMessage} return nil, err } @@ -411,7 +434,7 @@ func (f *FlagSet) ArgsLenAtDash() int { func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -427,7 +450,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -441,7 +464,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro func (f *FlagSet) MarkHidden(name string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } flag.Hidden = true return nil @@ -464,18 +487,16 @@ func (f *FlagSet) Set(name, value string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } err := flag.Value.Set(value) if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) + return &InvalidValueError{ + flag: flag, + value: value, + cause: err, } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } if !flag.Changed { @@ -501,7 +522,7 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } if flag.Annotations == nil { flag.Annotations = map[string][]string{} @@ -538,7 +559,7 @@ func (f *FlagSet) PrintDefaults() { func (f *Flag) defaultIsZeroValue() bool { switch f.Value.(type) { case boolFlag: - return f.DefValue == "false" + return f.DefValue == "false" || f.DefValue == "" case *durationValue: // Beginning in Go 1.7, duration zero values are "0s" return f.DefValue == "0" || f.DefValue == "0s" @@ -551,7 +572,7 @@ func (f *Flag) defaultIsZeroValue() bool { case *intSliceValue, *stringSliceValue, *stringArrayValue: return f.DefValue == "[]" default: - switch f.Value.String() { + switch f.DefValue { case "false": return true case "": @@ -588,8 +609,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = flag.Value.Type() switch name { - case "bool": + case "bool", "boolfunc": name = "" + case "func": + name = "value" case "float64": name = "float" case "int64": @@ -707,7 +730,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { switch flag.Value.Type() { case "string": line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": + case "bool", "boolfunc": if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -911,12 +934,10 @@ func VarP(value Value, name, shorthand, usage string) { CommandLine.VarP(value, name, shorthand, usage) } -// failf prints to standard error a formatted error and usage message and +// fail prints an error message and usage message to standard error and // returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) +func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -934,9 +955,9 @@ func (f *FlagSet) usage() { } } -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) +// --unknown (args will be empty) +// --unknown --next-flag ... (args will be --next-flag ...) +// --unknown arg ... (args will be arg ...) func stripUnknownFlagValue(args []string) []string { if len(args) == 0 { //--unknown @@ -960,7 +981,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = args name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) + err = f.fail(&InvalidSyntaxError{specifiedFlag: s}) return } @@ -974,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -982,7 +1005,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin return stripUnknownFlagValue(a), nil default: - err = f.failf("unknown flag: --%s", name) + err = f.fail(&NotExistError{name: name, messageType: flagUnknownFlagMessage}) return } } @@ -1000,13 +1023,16 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = a[1:] } else { // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: name, + }) return } err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1014,7 +1040,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { outArgs = args - if strings.HasPrefix(shorthands, "test.") { + if isGotestShorthandFlag(shorthands) { return } @@ -1029,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1039,7 +1067,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = stripUnknownFlagValue(outArgs) return default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + err = f.fail(&NotExistError{ + name: string(c), + specifiedShorthands: shorthands, + messageType: flagUnknownShorthandFlagMessage, + }) return } } @@ -1062,7 +1094,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = args[1:] } else { // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: string(c), + specifiedShorthands: shorthands, + }) return } @@ -1072,7 +1108,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1135,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true - if len(arguments) < 0 { + f.args = make([]string, 0, len(arguments)) + + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1151,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1177,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/hack/tools/vendor/github.com/spf13/pflag/func.go b/hack/tools/vendor/github.com/spf13/pflag/func.go new file mode 100644 index 0000000000..9f4d88f271 --- /dev/null +++ b/hack/tools/vendor/github.com/spf13/pflag/func.go @@ -0,0 +1,37 @@ +package pflag + +// -- func Value +type funcValue func(string) error + +func (f funcValue) Set(s string) error { return f(s) } + +func (f funcValue) Type() string { return "func" } + +func (f funcValue) String() string { return "" } // same behavior as stdlib 'flag' package + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func (f *FlagSet) Func(name string, usage string, fn func(string) error) { + f.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) FuncP(name string, shorthand string, usage string, fn func(string) error) { + var val Value = funcValue(fn) + f.VarP(val, name, shorthand, usage) +} + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func Func(name string, usage string, fn func(string) error) { + CommandLine.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func FuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.FuncP(name, shorthand, usage, fn) +} diff --git a/hack/tools/vendor/github.com/spf13/pflag/golangflag.go b/hack/tools/vendor/github.com/spf13/pflag/golangflag.go index d3dd72b7fe..e62eab5381 100644 --- a/hack/tools/vendor/github.com/spf13/pflag/golangflag.go +++ b/hack/tools/vendor/github.com/spf13/pflag/golangflag.go @@ -8,8 +8,18 @@ import ( goflag "flag" "reflect" "strings" + "time" ) +// go test flags prefixes +func isGotestFlag(flag string) bool { + return strings.HasPrefix(flag, "-test.") +} + +func isGotestShorthandFlag(flag string) bool { + return strings.HasPrefix(flag, "test.") +} + // flagValueWrapper implements pflag.Value around a flag.Value. The main // difference here is the addition of the Type method that returns a string // name of the type. As this is generally unknown, we approximate that with @@ -103,3 +113,49 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { } f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } + +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + +// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), +// since by default those are skipped by pflag.Parse(). +// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` +func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { + var skippedFlags []string + for _, f := range osArgs { + if isGotestFlag(f) { + skippedFlags = append(skippedFlags, f) + } + } + return goFlagSet.Parse(skippedFlags) +} + diff --git a/hack/tools/vendor/github.com/spf13/pflag/ipnet_slice.go b/hack/tools/vendor/github.com/spf13/pflag/ipnet_slice.go index 6b541aa879..c6e89da18d 100644 --- a/hack/tools/vendor/github.com/spf13/pflag/ipnet_slice.go +++ b/hack/tools/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -73,7 +73,7 @@ func (s *ipNetSliceValue) String() string { func ipNetSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IPNet{}, nil } diff --git a/hack/tools/vendor/github.com/spf13/pflag/string_to_string.go b/hack/tools/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc0..1d1e3bf91a 100644 --- a/hack/tools/vendor/github.com/spf13/pflag/string_to_string.go +++ b/hack/tools/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/hack/tools/vendor/github.com/spf13/pflag/text.go b/hack/tools/vendor/github.com/spf13/pflag/text.go new file mode 100644 index 0000000000..886d5a3d80 --- /dev/null +++ b/hack/tools/vendor/github.com/spf13/pflag/text.go @@ -0,0 +1,81 @@ +package pflag + +import ( + "encoding" + "fmt" + "reflect" +) + +// following is copied from go 1.23.4 flag.go +type textValue struct{ p encoding.TextUnmarshaler } + +func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue { + ptrVal := reflect.ValueOf(p) + if ptrVal.Kind() != reflect.Ptr { + panic("variable value type must be a pointer") + } + defVal := reflect.ValueOf(val) + if defVal.Kind() == reflect.Ptr { + defVal = defVal.Elem() + } + if defVal.Type() != ptrVal.Type().Elem() { + panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem())) + } + ptrVal.Elem().Set(defVal) + return textValue{p} +} + +func (v textValue) Set(s string) error { + return v.p.UnmarshalText([]byte(s)) +} + +func (v textValue) Get() interface{} { + return v.p +} + +func (v textValue) String() string { + if m, ok := v.p.(encoding.TextMarshaler); ok { + if b, err := m.MarshalText(); err == nil { + return string(b) + } + } + return "" +} + +//end of copy + +func (v textValue) Type() string { + return reflect.ValueOf(v.p).Type().Name() +} + +// GetText set out, which implements encoding.UnmarshalText, to the value of a flag with given name +func (f *FlagSet) GetText(name string, out encoding.TextUnmarshaler) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag accessed but not defined: %s", name) + } + if flag.Value.Type() != reflect.TypeOf(out).Name() { + return fmt.Errorf("trying to get %s value of flag of type %s", reflect.TypeOf(out).Name(), flag.Value.Type()) + } + return out.UnmarshalText([]byte(flag.Value.String())) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, shorthand, usage) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, shorthand, usage) +} diff --git a/hack/tools/vendor/github.com/spf13/pflag/time.go b/hack/tools/vendor/github.com/spf13/pflag/time.go new file mode 100644 index 0000000000..3dee424791 --- /dev/null +++ b/hack/tools/vendor/github.com/spf13/pflag/time.go @@ -0,0 +1,124 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// TimeValue adapts time.Time for use as a flag. +type timeValue struct { + *time.Time + formats []string +} + +func newTimeValue(val time.Time, p *time.Time, formats []string) *timeValue { + *p = val + return &timeValue{ + Time: p, + formats: formats, + } +} + +// Set time.Time value from string based on accepted formats. +func (d *timeValue) Set(s string) error { + s = strings.TrimSpace(s) + for _, f := range d.formats { + v, err := time.Parse(f, s) + if err != nil { + continue + } + *d.Time = v + return nil + } + + formatsString := "" + for i, f := range d.formats { + if i > 0 { + formatsString += ", " + } + formatsString += fmt.Sprintf("`%s`", f) + } + + return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString) +} + +// Type name for time.Time flags. +func (d *timeValue) Type() string { + return "time" +} + +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} + +// GetTime return the time value of a flag with the given name +func (f *FlagSet) GetTime(name string) (time.Time, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return time.Time{}, err + } + + if flag.Value.Type() != "time" { + err := fmt.Errorf("trying to get %s value of flag of type %s", "time", flag.Value.Type()) + return time.Time{}, err + } + + val, ok := flag.Value.(*timeValue) + if !ok { + return time.Time{}, fmt.Errorf("value %s is not a time", flag.Value) + } + + return *val.Time, nil +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func (f *FlagSet) TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + f.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + f.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + CommandLine.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + CommandLine.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func (f *FlagSet) Time(name string, value time.Time, formats []string, usage string) *time.Time { + return f.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + p := new(time.Time) + f.TimeVarP(p, name, shorthand, value, formats, usage) + return p +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func Time(name string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, shorthand, value, formats, usage) +} diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/.codecov.yml b/hack/tools/vendor/go.uber.org/automaxprocs/.codecov.yml deleted file mode 100644 index 9a2ed4a996..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/.codecov.yml +++ /dev/null @@ -1,14 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 90% # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/.gitignore b/hack/tools/vendor/go.uber.org/automaxprocs/.gitignore deleted file mode 100644 index dd7bcf5130..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -vendor - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -*.pprof -*.out -*.log -coverage.txt - -/bin -cover.out -cover.html diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/hack/tools/vendor/go.uber.org/automaxprocs/CHANGELOG.md deleted file mode 100644 index f421056ae8..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/CHANGELOG.md +++ /dev/null @@ -1,52 +0,0 @@ -# Changelog - -## v1.6.0 (2024-07-24) - -- Add RoundQuotaFunc option that allows configuration of rounding - behavior for floating point CPU quota. - -## v1.5.3 (2023-07-19) - -- Fix mountinfo parsing when super options have fields with spaces. -- Fix division by zero while parsing cgroups. - -## v1.5.2 (2023-03-16) - -- Support child control cgroups -- Fix file descriptor leak -- Update dependencies - -## v1.5.1 (2022-04-06) - -- Fix cgroups v2 mountpoint detection. - -## v1.5.0 (2022-04-05) - -- Add support for cgroups v2. - -Thanks to @emadolsky for their contribution to this release. - -## v1.4.0 (2021-02-01) - -- Support colons in cgroup names. -- Remove linters from runtime dependencies. - -## v1.3.0 (2020-01-23) - -- Migrate to Go modules. - -## v1.2.0 (2018-02-22) - -- Fixed quota clamping to always round down rather than up; Rather than - guaranteeing constant throttling at saturation, instead assume that the - fractional CPU was added as a hedge for factors outside of Go's scheduler. - -## v1.1.0 (2017-11-10) - -- Log the new value of `GOMAXPROCS` rather than the current value. -- Make logs more explicit about whether `GOMAXPROCS` was modified or not. -- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. - -## v1.0.0 (2017-08-09) - -- Initial release. diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/hack/tools/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md deleted file mode 100644 index e327d9aa5c..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual -identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an -appointed representative at an online or offline event. Representation of a -project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at oss-conduct@uber.com. The project -team will review and investigate all complaints, and will respond in a way -that it deems appropriate to the circumstances. The project team is obligated -to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -[http://contributor-covenant.org/version/1/4][version]. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/hack/tools/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md deleted file mode 100644 index 2b6a6040d7..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md +++ /dev/null @@ -1,81 +0,0 @@ -# Contributing - -We'd love your help improving this package! - -If you'd like to add new exported APIs, please [open an issue][open-issue] -describing your proposal — discussing API changes ahead of time makes -pull request review much smoother. In your issue, pull request, and any other -communications, please remember to treat your fellow contributors with -respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. - -Note that you'll need to sign [Uber's Contributor License Agreement][cla] -before we can accept any of your contributions. If necessary, a bot will remind -you to accept the CLA when you open your pull request. - -## Setup - -[Fork][fork], then clone the repository: - -``` -mkdir -p $GOPATH/src/go.uber.org -cd $GOPATH/src/go.uber.org -git clone git@github.com:your_github_username/automaxprocs.git -cd automaxprocs -git remote add upstream https://github.com/uber-go/automaxprocs.git -git fetch upstream -``` - -Install the test dependencies: - -``` -make dependencies -``` - -Make sure that the tests and the linters pass: - -``` -make test -make lint -``` - -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - -## Making Changes - -Start by creating a new branch for your changes: - -``` -cd $GOPATH/src/go.uber.org/automaxprocs -git checkout master -git fetch upstream -git rebase upstream/master -git checkout -b cool_new_feature -``` - -Make your changes, then ensure that `make lint` and `make test` still pass. If -you're satisfied with your changes, push them to your fork. - -``` -git push origin cool_new_feature -``` - -Then use the GitHub UI to open a pull request. - -At this point, you're waiting on us to review your changes. We *try* to respond -to issues and pull requests within a few business days, and we may suggest some -improvements or alternatives. Once your changes are approved, one of the -project maintainers will merge them. - -We're much more likely to approve your changes if you: - -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. - -[fork]: https://github.com/uber-go/automaxprocs/fork -[open-issue]: https://github.com/uber-go/automaxprocs/issues/new -[cla]: https://cla-assistant.io/uber-go/automaxprocs -[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/LICENSE b/hack/tools/vendor/go.uber.org/automaxprocs/LICENSE deleted file mode 100644 index 20dcf51d96..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/Makefile b/hack/tools/vendor/go.uber.org/automaxprocs/Makefile deleted file mode 100644 index 1642b71480..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -export GOBIN ?= $(shell pwd)/bin - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck - -.PHONY: build -build: - go build ./... - -.PHONY: install -install: - go mod download - -.PHONY: test -test: - go test -race ./... - -.PHONY: cover -cover: - go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html - -$(GOLINT): tools/go.mod - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): tools/go.mod - cd tools && go install honnef.co/go/tools/cmd/staticcheck@2023.1.2 - -.PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking gofmt" - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking go vet" - @go vet ./... 2>&1 | tee -a lint.log - @echo "Checking golint" - @$(GOLINT) ./... | tee -a lint.log - @echo "Checking staticcheck" - @$(STATICCHECK) ./... 2>&1 | tee -a lint.log - @echo "Checking for license headers..." - @./.build/check_license.sh | tee -a lint.log - @[ ! -s lint.log ] diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/README.md b/hack/tools/vendor/go.uber.org/automaxprocs/README.md deleted file mode 100644 index bfed32adae..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Automatically set `GOMAXPROCS` to match Linux container CPU quota. - -## Installation - -`go get -u go.uber.org/automaxprocs` - -## Quick Start - -```go -import _ "go.uber.org/automaxprocs" - -func main() { - // Your application logic here. -} -``` - -# Performance -Data measured from Uber's internal load balancer. We ran the load balancer with 200% CPU quota (i.e., 2 cores): - -| GOMAXPROCS | RPS | P50 (ms) | P99.9 (ms) | -| ------------------ | --------- | -------- | ---------- | -| 1 | 28,893.18 | 1.46 | 19.70 | -| 2 (equal to quota) | 44,715.07 | 0.84 | 26.38 | -| 3 | 44,212.93 | 0.66 | 30.07 | -| 4 | 41,071.15 | 0.57 | 42.94 | -| 8 | 33,111.69 | 0.43 | 64.32 | -| Default (24) | 22,191.40 | 0.45 | 76.19 | - -When `GOMAXPROCS` is increased above the CPU quota, we see P50 decrease slightly, but see significant increases to P99. We also see that the total RPS handled also decreases. - -When `GOMAXPROCS` is higher than the CPU quota allocated, we also saw significant throttling: - -``` -$ cat /sys/fs/cgroup/cpu,cpuacct/system.slice/[...]/cpu.stat -nr_periods 42227334 -nr_throttled 131923 -throttled_time 88613212216618 -``` - -Once `GOMAXPROCS` was reduced to match the CPU quota, we saw no CPU throttling. - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -automaxprocs to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep -an eye on issues and pull requests, but you can also report any negative -conduct to oss-conduct@uber.com. That email list is a private, safe space; -even the automaxprocs maintainers don't have access, so don't hesitate to hold -us to a high standard. - -
- -Released under the [MIT License](LICENSE). - -[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg -[doc]: https://godoc.org/go.uber.org/automaxprocs -[ci-img]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/automaxprocs - - diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/automaxprocs.go b/hack/tools/vendor/go.uber.org/automaxprocs/automaxprocs.go deleted file mode 100644 index 69946a3e1f..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/automaxprocs.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package automaxprocs automatically sets GOMAXPROCS to match the Linux -// container CPU quota, if any. -package automaxprocs // import "go.uber.org/automaxprocs" - -import ( - "log" - - "go.uber.org/automaxprocs/maxprocs" -) - -func init() { - maxprocs.Set(maxprocs.Logger(log.Printf)) -} diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go deleted file mode 100644 index 113555f63d..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package cgroups provides utilities to access Linux control group (CGroups) -// parameters (CPU quota, for example) for a given process. -package cgroups diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/hack/tools/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go deleted file mode 100644 index e561fe60b2..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to -// match the configured Linux CPU quota. Unlike the top-level automaxprocs -// package, it lets the caller configure logging and handle errors. -package maxprocs // import "go.uber.org/automaxprocs/maxprocs" - -import ( - "os" - "runtime" - - iruntime "go.uber.org/automaxprocs/internal/runtime" -) - -const _maxProcsKey = "GOMAXPROCS" - -func currentMaxProcs() int { - return runtime.GOMAXPROCS(0) -} - -type config struct { - printf func(string, ...interface{}) - procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) - minGOMAXPROCS int - roundQuotaFunc func(v float64) int -} - -func (c *config) log(fmt string, args ...interface{}) { - if c.printf != nil { - c.printf(fmt, args...) - } -} - -// An Option alters the behavior of Set. -type Option interface { - apply(*config) -} - -// Logger uses the supplied printf implementation for log output. By default, -// Set doesn't log anything. -func Logger(printf func(string, ...interface{})) Option { - return optionFunc(func(cfg *config) { - cfg.printf = printf - }) -} - -// Min sets the minimum GOMAXPROCS value that will be used. -// Any value below 1 is ignored. -func Min(n int) Option { - return optionFunc(func(cfg *config) { - if n >= 1 { - cfg.minGOMAXPROCS = n - } - }) -} - -// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. -func RoundQuotaFunc(rf func(v float64) int) Option { - return optionFunc(func(cfg *config) { - cfg.roundQuotaFunc = rf - }) -} - -type optionFunc func(*config) - -func (of optionFunc) apply(cfg *config) { of(cfg) } - -// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning -// any error encountered and an undo function. -// -// Set is a no-op on non-Linux systems and in Linux environments without a -// configured CPU quota. -func Set(opts ...Option) (func(), error) { - cfg := &config{ - procs: iruntime.CPUQuotaToGOMAXPROCS, - roundQuotaFunc: iruntime.DefaultRoundFunc, - minGOMAXPROCS: 1, - } - for _, o := range opts { - o.apply(cfg) - } - - undoNoop := func() { - cfg.log("maxprocs: No GOMAXPROCS change to reset") - } - - // Honor the GOMAXPROCS environment variable if present. Otherwise, amend - // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is - // Linux, and guarantee a minimum value of 1. The minimum guaranteed value - // can be overridden using `maxprocs.Min()`. - if max, exists := os.LookupEnv(_maxProcsKey); exists { - cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) - return undoNoop, nil - } - - maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) - if err != nil { - return undoNoop, err - } - - if status == iruntime.CPUQuotaUndefined { - cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) - return undoNoop, nil - } - - prev := currentMaxProcs() - undo := func() { - cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) - runtime.GOMAXPROCS(prev) - } - - switch status { - case iruntime.CPUQuotaMinUsed: - cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) - case iruntime.CPUQuotaUsed: - cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) - } - - runtime.GOMAXPROCS(maxProcs) - return undo, nil -} diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/hack/tools/vendor/go.uber.org/automaxprocs/maxprocs/version.go deleted file mode 100644 index cc7fc5aee1..0000000000 --- a/hack/tools/vendor/go.uber.org/automaxprocs/maxprocs/version.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package maxprocs - -// Version is the current package version. -const Version = "1.6.0" diff --git a/hack/tools/vendor/go.uber.org/mock/mockgen/archive.go b/hack/tools/vendor/go.uber.org/mock/mockgen/archive.go new file mode 100644 index 0000000000..bc80ad119b --- /dev/null +++ b/hack/tools/vendor/go.uber.org/mock/mockgen/archive.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "go/token" + "go/types" + "os" + + "go.uber.org/mock/mockgen/model" + + "golang.org/x/tools/go/gcexportdata" +) + +func archiveMode(importPath string, symbols []string, archive string) (*model.Package, error) { + f, err := os.Open(archive) + if err != nil { + return nil, err + } + defer f.Close() + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("read export data %q: %v", archive, err) + } + + fset := token.NewFileSet() + imports := make(map[string]*types.Package) + tp, err := gcexportdata.Read(r, fset, imports, importPath) + if err != nil { + return nil, err + } + + pkg := &model.Package{ + Name: tp.Name(), + PkgPath: tp.Path(), + Interfaces: make([]*model.Interface, 0, len(symbols)), + } + for _, name := range symbols { + m := tp.Scope().Lookup(name) + tn, ok := m.(*types.TypeName) + if !ok { + continue + } + ti, ok := tn.Type().Underlying().(*types.Interface) + if !ok { + continue + } + it, err := model.InterfaceFromGoTypesType(ti) + if err != nil { + return nil, err + } + it.Name = m.Name() + pkg.Interfaces = append(pkg.Interfaces, it) + } + return pkg, nil +} diff --git a/hack/tools/vendor/go.uber.org/mock/mockgen/mockgen.go b/hack/tools/vendor/go.uber.org/mock/mockgen/mockgen.go index 79cce84b34..d3d92ba16c 100644 --- a/hack/tools/vendor/go.uber.org/mock/mockgen/mockgen.go +++ b/hack/tools/vendor/go.uber.org/mock/mockgen/mockgen.go @@ -54,6 +54,7 @@ var ( ) var ( + archive = flag.String("archive", "", "(archive mode) Input Go archive file; enables archive mode.") source = flag.String("source", "", "(source mode) Input Go source file; enables source mode.") destination = flag.String("destination", "", "Output file; defaults to stdout.") mockNames = flag.String("mock_names", "", "Comma-separated interfaceName=mockName pairs of explicit mock names to use. Mock names default to 'Mock'+ interfaceName suffix.") @@ -68,11 +69,10 @@ var ( typed = flag.Bool("typed", false, "Generate Type-safe 'Return', 'Do', 'DoAndReturn' function") imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.") auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.") - excludeInterfaces = flag.String("exclude_interfaces", "", "(source mode) Comma-separated names of interfaces to be excluded") modelGob = flag.String("model_gob", "", "Skip package/source loading entirely and use the gob encoded model.Package at the given path") - - debugParser = flag.Bool("debug_parser", false, "Print out parser results only.") - showVersion = flag.Bool("version", false, "Print version.") + excludeInterfaces = flag.String("exclude_interfaces", "", "Comma-separated names of interfaces to be excluded") + debugParser = flag.Bool("debug_parser", false, "Print out parser results only.") + showVersion = flag.Bool("version", false, "Print version.") ) func main() { @@ -89,17 +89,24 @@ func main() { var pkg *model.Package var err error var packageName string - if *modelGob != "" { + + // Switch between modes + switch { + case *modelGob != "": // gob mode pkg, err = gobMode(*modelGob) - } else if *source != "" { + case *source != "": // source mode pkg, err = sourceMode(*source) - } else { - if flag.NArg() != 2 { - usage() - log.Fatal("Expected exactly two arguments") - } + case *archive != "": // archive mode + checkArgs() + packageName = flag.Arg(0) + interfaces := strings.Split(flag.Arg(1), ",") + pkg, err = archiveMode(packageName, interfaces, *archive) + + default: // package mode + checkArgs() packageName = flag.Arg(0) interfaces := strings.Split(flag.Arg(1), ",") + if packageName == "." { dir, err := os.Getwd() if err != nil { @@ -109,10 +116,12 @@ func main() { if err != nil { log.Fatalf("Parse package name failed: %v", err) } + } parser := packageModeParser{} pkg, err = parser.parsePackage(packageName, interfaces) } + if err != nil { log.Fatalf("Loading input failed: %v", err) } @@ -155,6 +164,8 @@ func main() { } if *source != "" { g.filename = *source + } else if *archive != "" { + g.filename = *archive } else { g.srcPackage = packageName g.srcInterfaces = flag.Arg(1) @@ -230,12 +241,19 @@ func parseExcludeInterfaces(names string) map[string]struct{} { return namesSet } +func checkArgs() { + if flag.NArg() != 2 { + usage() + log.Fatal("Expected exactly two arguments") + } +} + func usage() { _, _ = io.WriteString(os.Stderr, usageText) flag.PrintDefaults() } -const usageText = `mockgen has two modes of operation: source and package. +const usageText = `mockgen has three modes of operation: archive, source and package. Source mode generates mock interfaces from a source file. It is enabled by using the -source flag. Other flags that @@ -245,12 +263,19 @@ Example: Package mode works by specifying the package and interface names. It is enabled by passing two non-flag arguments: an import path, and a -comma-separated list of symbols. +comma-separated list of symbols. You can use "." to refer to the current path's package. Example: mockgen database/sql/driver Conn,Driver mockgen . SomeInterface +Archive mode generates mock interfaces from a package archive +file (.a). It is enabled by using the -archive flag and two +non-flag arguments: an import path, and a comma-separated +list of symbols. +Example: + mockgen -archive=pkg.a database/sql/driver Conn,Driver + ` type generator struct { diff --git a/hack/tools/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go b/hack/tools/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go new file mode 100644 index 0000000000..4596c3d28d --- /dev/null +++ b/hack/tools/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go @@ -0,0 +1,160 @@ +package model + +import ( + "fmt" + "go/types" +) + +// InterfaceFromGoTypesType returns a pointer to an interface for the +// given interface type loaded from archive. +func InterfaceFromGoTypesType(it *types.Interface) (*Interface, error) { + intf := &Interface{} + + for i := 0; i < it.NumMethods(); i++ { + mt := it.Method(i) + // Skip unexported methods. + if !mt.Exported() { + continue + } + m := &Method{ + Name: mt.Name(), + } + + var err error + m.In, m.Variadic, m.Out, err = funcArgsFromGoTypesType(mt.Type().(*types.Signature)) + if err != nil { + return nil, fmt.Errorf("method %q: %w", mt.Name(), err) + } + + intf.AddMethod(m) + } + + return intf, nil +} + +func funcArgsFromGoTypesType(t *types.Signature) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) { + nin := t.Params().Len() + if t.Variadic() { + nin-- + } + for i := 0; i < nin; i++ { + p, err := parameterFromGoTypesType(t.Params().At(i), false) + if err != nil { + return nil, nil, nil, err + } + in = append(in, p) + } + if t.Variadic() { + p, err := parameterFromGoTypesType(t.Params().At(nin), true) + if err != nil { + return nil, nil, nil, err + } + variadic = p + } + for i := 0; i < t.Results().Len(); i++ { + p, err := parameterFromGoTypesType(t.Results().At(i), false) + if err != nil { + return nil, nil, nil, err + } + out = append(out, p) + } + return +} + +func parameterFromGoTypesType(v *types.Var, variadic bool) (*Parameter, error) { + t := v.Type() + if variadic { + t = t.(*types.Slice).Elem() + } + tt, err := typeFromGoTypesType(t) + if err != nil { + return nil, err + } + return &Parameter{Name: v.Name(), Type: tt}, nil +} + +func typeFromGoTypesType(t types.Type) (Type, error) { + if t, ok := t.(*types.Named); ok { + tn := t.Obj() + if tn.Pkg() == nil { + return PredeclaredType(tn.Name()), nil + } + return &NamedType{ + Package: tn.Pkg().Path(), + Type: tn.Name(), + }, nil + } + + // only unnamed or predeclared types after here + + // Lots of types have element types. Let's do the parsing and error checking for all of them. + var elemType Type + if t, ok := t.(interface{ Elem() types.Type }); ok { + var err error + elemType, err = typeFromGoTypesType(t.Elem()) + if err != nil { + return nil, err + } + } + + switch t := t.(type) { + case *types.Array: + return &ArrayType{ + Len: int(t.Len()), + Type: elemType, + }, nil + case *types.Basic: + return PredeclaredType(t.String()), nil + case *types.Chan: + var dir ChanDir + switch t.Dir() { + case types.RecvOnly: + dir = RecvDir + case types.SendOnly: + dir = SendDir + } + return &ChanType{ + Dir: dir, + Type: elemType, + }, nil + case *types.Signature: + in, variadic, out, err := funcArgsFromGoTypesType(t) + if err != nil { + return nil, err + } + return &FuncType{ + In: in, + Out: out, + Variadic: variadic, + }, nil + case *types.Interface: + if t.NumMethods() == 0 { + return PredeclaredType("interface{}"), nil + } + case *types.Map: + kt, err := typeFromGoTypesType(t.Key()) + if err != nil { + return nil, err + } + return &MapType{ + Key: kt, + Value: elemType, + }, nil + case *types.Pointer: + return &PointerType{ + Type: elemType, + }, nil + case *types.Slice: + return &ArrayType{ + Len: -1, + Type: elemType, + }, nil + case *types.Struct: + if t.NumFields() == 0 { + return PredeclaredType("struct{}"), nil + } + // TODO: UnsafePointer + } + + return nil, fmt.Errorf("can't yet turn %v (%T) into a model.Type", t.String(), t) +} diff --git a/hack/tools/vendor/go.yaml.in/yaml/v2/.travis.yml b/hack/tools/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 0000000000..7348c50c0c --- /dev/null +++ b/hack/tools/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/hack/tools/vendor/go.yaml.in/yaml/v2/LICENSE similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE rename to hack/tools/vendor/go.yaml.in/yaml/v2/LICENSE diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/hack/tools/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml rename to hack/tools/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/hack/tools/vendor/go.yaml.in/yaml/v2/NOTICE similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE rename to hack/tools/vendor/go.yaml.in/yaml/v2/NOTICE diff --git a/hack/tools/vendor/go.yaml.in/yaml/v2/README.md b/hack/tools/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 0000000000..c9388da425 --- /dev/null +++ b/hack/tools/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/hack/tools/vendor/go.yaml.in/yaml/v2/apic.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/apic.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/hack/tools/vendor/go.yaml.in/yaml/v2/decode.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/decode.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/hack/tools/vendor/go.yaml.in/yaml/v2/emitterc.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/emitterc.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/hack/tools/vendor/go.yaml.in/yaml/v2/encode.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/encode.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/hack/tools/vendor/go.yaml.in/yaml/v2/parserc.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/parserc.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/hack/tools/vendor/go.yaml.in/yaml/v2/readerc.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/readerc.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/hack/tools/vendor/go.yaml.in/yaml/v2/resolve.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/resolve.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/hack/tools/vendor/go.yaml.in/yaml/v2/scannerc.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/scannerc.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/hack/tools/vendor/go.yaml.in/yaml/v2/sorter.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/sorter.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/hack/tools/vendor/go.yaml.in/yaml/v2/writerc.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/writerc.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/hack/tools/vendor/go.yaml.in/yaml/v2/yaml.go similarity index 99% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/yaml.go index 30813884c0..5248e1263c 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v2/yaml.go @@ -2,7 +2,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml +// https://github.com/yaml/go-yaml // package yaml diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/hack/tools/vendor/go.yaml.in/yaml/v2/yamlh.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/yamlh.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/hack/tools/vendor/go.yaml.in/yaml/v2/yamlprivateh.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go rename to hack/tools/vendor/go.yaml.in/yaml/v2/yamlprivateh.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE b/hack/tools/vendor/go.yaml.in/yaml/v3/LICENSE similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE rename to hack/tools/vendor/go.yaml.in/yaml/v3/LICENSE diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE b/hack/tools/vendor/go.yaml.in/yaml/v3/NOTICE similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE rename to hack/tools/vendor/go.yaml.in/yaml/v3/NOTICE diff --git a/hack/tools/vendor/go.yaml.in/yaml/v3/README.md b/hack/tools/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 0000000000..15a85a6350 --- /dev/null +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go b/hack/tools/vendor/go.yaml.in/yaml/v3/apic.go similarity index 99% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/apic.go index ae7d049f18..05fd305da1 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/apic.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go b/hack/tools/vendor/go.yaml.in/yaml/v3/decode.go similarity index 97% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/decode.go index 0173b6982e..02e2b17bfe 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/decode.go @@ -832,10 +832,10 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { if d.unmarshal(n.Content[i], k) { if mergedFields != nil { ki := k.Interface() - if mergedFields[ki] { + if d.getPossiblyUnhashableKey(mergedFields, ki) { continue } - mergedFields[ki] = true + d.setPossiblyUnhashableKey(mergedFields, ki, true) } kkind := k.Kind() if kkind == reflect.Interface { @@ -956,6 +956,24 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { mergedFields := d.mergedFields if mergedFields == nil { @@ -963,7 +981,7 @@ func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { for i := 0; i < len(parent.Content); i += 2 { k := reflect.New(ifaceType).Elem() if d.unmarshal(parent.Content[i], k) { - d.mergedFields[k.Interface()] = true + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) } } } diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go b/hack/tools/vendor/go.yaml.in/yaml/v3/emitterc.go similarity index 98% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/emitterc.go index 6ea0ae8c10..ab4e03ba72 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -162,10 +162,9 @@ func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { // Check if we need to accumulate more events before emitting. // // We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { if emitter.events_head == len(emitter.events) { return true @@ -485,6 +484,18 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") } +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go b/hack/tools/vendor/go.yaml.in/yaml/v3/encode.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/encode.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go b/hack/tools/vendor/go.yaml.in/yaml/v3/parserc.go similarity index 93% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/parserc.go index 268558a0d6..25fe823637 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/parserc.go @@ -227,7 +227,8 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool // Parse the production: // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ +// +// ************ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -249,9 +250,12 @@ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// * +// +// * +// // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* +// +// ************************* func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { token := peek_token(parser) @@ -356,8 +360,8 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t // Parse the productions: // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** // +// *********** func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -379,9 +383,10 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -428,30 +433,41 @@ func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// // block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// // flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// // properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* +// +// ************************* +// // block_content ::= block_collection | flow_collection | SCALAR -// ****** +// +// ****** +// // flow_content ::= flow_collection | SCALAR -// ****** +// +// ****** func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() @@ -682,8 +698,8 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i // Parse the productions: // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* // +// ******************** *********** * ********* func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -740,7 +756,8 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e // Parse the productions: // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * +// +// *********** * func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -805,14 +822,14 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* // -// BLOCK-END -// ********* +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* // +// BLOCK-END +// ********* func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -881,13 +898,11 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// +// ((KEY block_node_or_indentless_sequence?)? // +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -915,16 +930,18 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev // Parse the productions: // flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -987,11 +1004,10 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev return true } -// // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * // +// *** * func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1011,8 +1027,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, ev // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * // +// ***** * func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1035,8 +1051,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1053,16 +1069,17 @@ func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, ev // Parse the productions: // flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * // +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -1128,8 +1145,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event // Parse the productions: // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// +// - ***** * func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { token := peek_token(parser) if token == nil { diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go b/hack/tools/vendor/go.yaml.in/yaml/v3/readerc.go similarity index 99% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/readerc.go index b7de0a89c4..56af245366 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/readerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go b/hack/tools/vendor/go.yaml.in/yaml/v3/resolve.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/resolve.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go b/hack/tools/vendor/go.yaml.in/yaml/v3/scannerc.go similarity index 99% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/scannerc.go index ca0070108f..30b1f08920 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -1614,11 +1614,11 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { // Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { // Eat '%'. start_mark := parser.mark @@ -1719,11 +1719,11 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Scan the directive name. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ // +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { // Consume the directive name. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1758,8 +1758,9 @@ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark // Scan the value of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ +// +// %YAML 1.1 # a comment \n +// ^^^^^^ func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1797,10 +1798,11 @@ const max_number_length = 2 // Scan the version number of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { // Repeat while the next character is digit. @@ -1834,9 +1836,9 @@ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark // Scan the value of a TAG-DIRECTIVE token. // // Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { var handle_value, prefix_value []byte @@ -2847,7 +2849,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t continue } if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false @@ -2876,7 +2878,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t parser.comments = append(parser.comments, yaml_comment_t{ token_mark: token_mark, start_mark: start_mark, - line: text, + line: text, }) } return true @@ -2910,7 +2912,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo // the foot is the line below it. var foot_line = -1 if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 + foot_line = parser.mark.line - parser.newlines + 1 if parser.newlines == 0 && parser.mark.column > 1 { foot_line++ } @@ -2996,7 +2998,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo recent_empty = false // Consume until after the consumed comment line. - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go b/hack/tools/vendor/go.yaml.in/yaml/v3/sorter.go similarity index 100% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/sorter.go diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go b/hack/tools/vendor/go.yaml.in/yaml/v3/writerc.go similarity index 99% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/writerc.go index b8a116bf9a..266d0b092c 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/writerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go b/hack/tools/vendor/go.yaml.in/yaml/v3/yaml.go similarity index 91% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/yaml.go index 8cec6da48d..0b101cd20d 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/yaml.go @@ -17,8 +17,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml -// +// https://github.com/yaml/go-yaml package yaml import ( @@ -75,16 +74,15 @@ type Marshaler interface { // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) // // See the documentation of Marshal for the format of tags and a list of // supported tag options. -// func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } @@ -185,36 +183,35 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // // The field tag format accepted is: // -// `(...) yaml:"[][,[,]]" (...)` +// `(...) yaml:"[][,[,]]" (...)` // // The following flags are currently supported: // -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. // -// flow Marshal using a flow style (useful for structs, -// sequences and maps). +// flow Marshal using a flow style (useful for structs, +// sequences and maps). // -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. // // In addition, if the key is "-", the field is ignored. // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() @@ -278,6 +275,16 @@ func (e *Encoder) SetIndent(spaces int) { e.encoder.indent = spaces } +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + // Close closes the encoder by writing any remaining data. // It does not write a stream terminating string "...". func (e *Encoder) Close() (err error) { @@ -358,22 +365,21 @@ const ( // // For example: // -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) // -// var person Node -// err := yaml.Unmarshal(data, &person) +// Or by itself: // +// var person Node +// err := yaml.Unmarshal(data, &person) type Node struct { // Kind defines whether the node is a document, a mapping, a sequence, // a scalar value, or an alias to another node. The specific data type of // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind + Kind Kind // Style allows customizing the apperance of the node in the tree. Style Style @@ -421,7 +427,6 @@ func (n *Node) IsZero() bool { n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 } - // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go b/hack/tools/vendor/go.yaml.in/yaml/v3/yamlh.go similarity index 99% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/yamlh.go index 40c74de497..f59aa40f64 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -438,7 +438,9 @@ type yaml_document_t struct { // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). +// +// yaml_parser_set_input(). +// // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. @@ -639,7 +641,6 @@ type yaml_parser_t struct { } type yaml_comment_t struct { - scan_mark yaml_mark_t // Position where scanning for comments started token_mark yaml_mark_t // Position after which tokens will be associated with this comment start_mark yaml_mark_t // Position of '#' comment mark @@ -659,13 +660,14 @@ type yaml_comment_t struct { // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). +// +// yaml_emitter_set_output(). +// // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. -// type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error type yaml_emitter_state_t int diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go b/hack/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go similarity index 97% rename from hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go rename to hack/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go index e88f9c54ae..dea1ba9610 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go +++ b/hack/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -137,8 +137,8 @@ func is_crlf(b []byte, i int) bool { func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) return ( - // is_break: - b[i] == '\r' || // CR (#xD) + // is_break: + b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) @@ -151,8 +151,8 @@ func is_breakz(b []byte, i int) bool { func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) return ( - // is_space: - b[i] == ' ' || + // is_space: + b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) @@ -166,8 +166,8 @@ func is_spacez(b []byte, i int) bool { func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || + // is_blank: + b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) diff --git a/hack/tools/vendor/golang.org/x/net/http2/frame.go b/hack/tools/vendor/golang.org/x/net/http2/frame.go index 97bd8b06f7..db3264da8c 100644 --- a/hack/tools/vendor/golang.org/x/net/http2/frame.go +++ b/hack/tools/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -509,7 +509,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } return nil, ErrFrameTooLarge } diff --git a/hack/tools/vendor/golang.org/x/net/http2/http2.go b/hack/tools/vendor/golang.org/x/net/http2/http2.go index 6c18ea230b..ea5ae629fd 100644 --- a/hack/tools/vendor/golang.org/x/net/http2/http2.go +++ b/hack/tools/vendor/golang.org/x/net/http2/http2.go @@ -11,8 +11,6 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( diff --git a/hack/tools/vendor/golang.org/x/net/trace/events.go b/hack/tools/vendor/golang.org/x/net/trace/events.go index c646a6952e..3aaffdd1f7 100644 --- a/hack/tools/vendor/golang.org/x/net/trace/events.go +++ b/hack/tools/vendor/golang.org/x/net/trace/events.go @@ -508,7 +508,7 @@ const eventsHTML = ` {{$el.When}} {{$el.ElapsedTime}} - {{$el.Title}} + {{$el.Title}} {{if $.Expanded}} diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/doc.go b/hack/tools/vendor/golang.org/x/oauth2/internal/doc.go index 03265e888a..8c7c475f2d 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/internal/doc.go +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/oauth2.go b/hack/tools/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989beaf4..71ea6ad1f5 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/token.go b/hack/tools/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef0f..8389f24629 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/internal/token.go +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/transport.go b/hack/tools/vendor/golang.org/x/oauth2/internal/transport.go index b9db01ddfd..afc0aeb274 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/internal/transport.go +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/hack/tools/vendor/golang.org/x/oauth2/oauth2.go b/hack/tools/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80..de34feb844 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/oauth2.go +++ b/hack/tools/vendor/golang.org/x/oauth2/oauth2.go @@ -22,9 +22,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +46,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is @@ -135,7 +135,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } - return tk, err + return tk, nil } // reuseTokenSource is a TokenSource that holds a single token in memory @@ -305,8 +305,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -356,15 +355,19 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { return internal.ContextClient(ctx) } + cc := internal.ContextClient(ctx) return &http.Client{ Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, + Base: cc.Transport, Source: ReuseTokenSource(nil, src), }, + CheckRedirect: cc.CheckRedirect, + Jar: cc.Jar, + Timeout: cc.Timeout, } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -372,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -393,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/hack/tools/vendor/golang.org/x/oauth2/pkce.go b/hack/tools/vendor/golang.org/x/oauth2/pkce.go index 50593b6dfe..cea8374d51 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/pkce.go +++ b/hack/tools/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/hack/tools/vendor/golang.org/x/oauth2/token.go b/hack/tools/vendor/golang.org/x/oauth2/token.go index 109997d77c..239ec32962 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/token.go +++ b/hack/tools/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,13 +163,14 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. +// with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { diff --git a/hack/tools/vendor/golang.org/x/oauth2/transport.go b/hack/tools/vendor/golang.org/x/oauth2/transport.go index 90657915fb..8bbebbac9e 100644 --- a/hack/tools/vendor/golang.org/x/oauth2/transport.go +++ b/hack/tools/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/hack/tools/vendor/golang.org/x/sync/errgroup/errgroup.go b/hack/tools/vendor/golang.org/x/sync/errgroup/errgroup.go index cb6bb9ad3b..1d8cffae8c 100644 --- a/hack/tools/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/hack/tools/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -12,8 +12,6 @@ package errgroup import ( "context" "fmt" - "runtime" - "runtime/debug" "sync" ) @@ -33,10 +31,6 @@ type Group struct { errOnce sync.Once err error - - mu sync.Mutex - panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked. - abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit). } func (g *Group) done() { @@ -56,22 +50,13 @@ func WithContext(ctx context.Context) (*Group, context.Context) { return &Group{cancel: cancel}, ctx } -// Wait blocks until all function calls from the Go method have returned -// normally, then returns the first non-nil error (if any) from them. -// -// If any of the calls panics, Wait panics with a [PanicValue]; -// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit. +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { g.cancel(g.err) } - if g.panicValue != nil { - panic(g.panicValue) - } - if g.abnormal { - runtime.Goexit() - } return g.err } @@ -81,53 +66,31 @@ func (g *Group) Wait() error { // It blocks until the new goroutine can be added without the number of // goroutines in the group exceeding the configured limit. // -// The first goroutine in the group that returns a non-nil error, panics, or -// invokes [runtime.Goexit] will cancel the associated Context, if any. +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} } - g.add(f) -} - -func (g *Group) add(f func() error) { g.wg.Add(1) go func() { defer g.done() - normalReturn := false - defer func() { - if normalReturn { - return - } - v := recover() - g.mu.Lock() - defer g.mu.Unlock() - if !g.abnormal { - if g.cancel != nil { - g.cancel(g.err) - } - g.abnormal = true - } - if v != nil && g.panicValue == nil { - switch v := v.(type) { - case error: - g.panicValue = PanicError{ - Recovered: v, - Stack: debug.Stack(), - } - default: - g.panicValue = PanicValue{ - Recovered: v, - Stack: debug.Stack(), - } - } - } - }() - err := f() - normalReturn = true - if err != nil { + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + + if err := f(); err != nil { g.errOnce.Do(func() { g.err = err if g.cancel != nil { @@ -152,7 +115,19 @@ func (g *Group) TryGo(f func() error) bool { } } - g.add(f) + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() return true } @@ -174,34 +149,3 @@ func (g *Group) SetLimit(n int) { } g.sem = make(chan token, n) } - -// PanicError wraps an error recovered from an unhandled panic -// when calling a function passed to Go or TryGo. -type PanicError struct { - Recovered error - Stack []byte // result of call to [debug.Stack] -} - -func (p PanicError) Error() string { - if len(p.Stack) > 0 { - return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack) - } - return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) -} - -func (p PanicError) Unwrap() error { return p.Recovered } - -// PanicValue wraps a value that does not implement the error interface, -// recovered from an unhandled panic when calling a function passed to Go or -// TryGo. -type PanicValue struct { - Recovered any - Stack []byte // result of call to [debug.Stack] -} - -func (p PanicValue) String() string { - if len(p.Stack) > 0 { - return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack) - } - return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) -} diff --git a/hack/tools/vendor/golang.org/x/sys/unix/mkerrors.sh b/hack/tools/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c31..d1c8b2640e 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/hack/tools/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/hack/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go b/hack/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3b..7838ca5db2 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8f..b6db27d937 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,6 +319,7 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f AUDIT_IPE_ACCESS = 0x58c @@ -327,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -491,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -527,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -554,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -843,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -936,11 +942,10 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -1203,13 +1208,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1224,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1240,6 +1253,7 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 @@ -1247,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1266,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1574,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1625,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1687,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1809,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2485,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2644,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2724,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2787,7 +2813,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2864,10 +2890,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2917,11 +2945,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2970,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2987,11 +3018,12 @@ const ( RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x7f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3271,6 +3303,7 @@ const ( STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3322,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3392,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3503,6 +3534,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3515,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3559,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3673,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c7..1c37f9fbc4 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -372,6 +374,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda535..6f54d34aef 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -373,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab86..783ec5c126 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -378,6 +380,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d3..ca83d3ba16 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa9..607e611c0c 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -365,6 +367,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37a..b9cb5bd3c0 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96a..65b078a638 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902a..5298a3033d 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e27..7bc557c876 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c224272615..152399bb04 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -426,6 +428,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee13..1a1ce2409c 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941f..4231a1fb57 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc42..21c0e95266 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -362,6 +364,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb80..f00d1cd7cf 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -434,6 +436,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e57514..bc8d539e6a 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -473,6 +475,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306..aca56ee494 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e..2ea1ef58c3 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e5029744..d22c8af319 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51b..5ee264ae97 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18..f9f03ebf5f 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b..87c2118e84 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278..391ad102fb 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e6..5656157757 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc22..0482b52e3c 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb1..71806f08f3 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b446365025..e35a710582 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c188..2aea476705 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 8405391698..6c9bb4e560 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6..680bc9915a 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d..620f271052 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe6472..cd236443f6 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -114,8 +114,10 @@ type Statx_t struct { Atomic_write_unit_min uint32 Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 _ [1]uint32 - _ [9]uint64 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -2226,8 +2229,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2314,6 +2320,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2594,8 +2605,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3802,7 +3813,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2d + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3862,17 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3949,7 +3979,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4015,7 +4050,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x6 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4101,6 +4138,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4613,6 +4663,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4623,6 +4674,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4682,6 +4734,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4717,6 +4770,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4747,9 +4801,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4774,9 +4829,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4809,12 +4867,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4943,7 +5003,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4978,6 +5040,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -5001,6 +5065,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -5032,6 +5100,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -5117,7 +5188,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5161,6 +5233,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5180,6 +5253,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5247,6 +5321,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5262,6 +5337,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5281,9 +5357,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5295,8 +5374,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5343,7 +5424,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5351,12 +5435,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5364,8 +5450,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5430,6 +5519,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5458,9 +5548,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5703,11 +5794,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5753,6 +5849,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5770,14 +5868,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5788,7 +5891,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5849,6 +5955,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -6007,6 +6114,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -6038,6 +6152,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da43f..485f2d3a1b 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e1864..ecbd1ad8bc 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108b6..02f0463a44 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f1f..6f4d400d24 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26c1..cd532cfa55 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2f1..4133620851 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d45356..eaa37eb718 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea1866..98ae6a1e4a 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c48..cae1961594 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 8359728759..6ce3b4e028 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c68..c7429c6a14 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb62b..4bf4baf4ca 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ad05b51a60..e9709d70af 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce90037..fb44268ca7 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b56739c..9c38265c74 100644 --- a/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/hack/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/hack/tools/vendor/golang.org/x/term/term_windows.go b/hack/tools/vendor/golang.org/x/term/term_windows.go index df6bf948e1..0ddd81c02a 100644 --- a/hack/tools/vendor/golang.org/x/term/term_windows.go +++ b/hack/tools/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/hack/tools/vendor/golang.org/x/term/terminal.go b/hack/tools/vendor/golang.org/x/term/terminal.go index 13e9a64ad1..bddb2e2aeb 100644 --- a/hack/tools/vendor/golang.org/x/term/terminal.go +++ b/hack/tools/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 6e34df4613..0fb4e7eea8 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -113,7 +113,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // childrenOf elides the FuncType node beneath FuncDecl. // Add it back here for TypeParams, Params, Results, // all FieldLists). But we don't add it back for the "func" token - // even though it is is the tree at FuncDecl.Type.Func. + // even though it is the tree at FuncDecl.Type.Func. if decl, ok := node.(*ast.FuncDecl); ok { if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { path = append(path, decl.Type) @@ -207,6 +207,9 @@ func childrenOf(n ast.Node) []ast.Node { return false // no recursion }) + // TODO(adonovan): be more careful about missing (!Pos.Valid) + // tokens in trees produced from invalid input. + // Then add fake Nodes for bare tokens. switch n := n.(type) { case *ast.ArrayType: @@ -226,9 +229,12 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, tok(n.OpPos, len(n.Op.String()))) case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) + if n.Lbrace.IsValid() { + children = append(children, tok(n.Lbrace, len("{"))) + } + if n.Rbrace.IsValid() { + children = append(children, tok(n.Rbrace, len("}"))) + } case *ast.BranchStmt: children = append(children, @@ -304,9 +310,12 @@ func childrenOf(n ast.Node) []ast.Node { // TODO(adonovan): Field.{Doc,Comment,Tag}? case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), // or len("[") - tok(n.Closing, len(")"))) // or len("]") + if n.Opening.IsValid() { + children = append(children, tok(n.Opening, len("("))) + } + if n.Closing.IsValid() { + children = append(children, tok(n.Closing, len(")"))) + } case *ast.File: // TODO test: Doc diff --git a/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 5c8dbbb7a3..4ad0549304 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/hack/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -67,6 +67,10 @@ var abort = new(int) // singleton, to signal termination of Apply // // The methods Replace, Delete, InsertBefore, and InsertAfter // can be used to change the AST without disrupting Apply. +// +// This type is not to be confused with [inspector.Cursor] from +// package [golang.org/x/tools/go/ast/inspector], which provides +// stateless navigation of immutable syntax trees. type Cursor struct { parent ast.Node name string diff --git a/hack/tools/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/hack/tools/vendor/golang.org/x/tools/go/ast/edge/edge.go similarity index 100% rename from hack/tools/vendor/golang.org/x/tools/internal/astutil/edge/edge.go rename to hack/tools/vendor/golang.org/x/tools/go/ast/edge/edge.go diff --git a/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/cursor.go new file mode 100644 index 0000000000..31c8d2f240 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -0,0 +1,502 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +import ( + "fmt" + "go/ast" + "go/token" + "iter" + "reflect" + + "golang.org/x/tools/go/ast/edge" +) + +// A Cursor represents an [ast.Node]. It is immutable. +// +// Two Cursors compare equal if they represent the same node. +// +// Call [Inspector.Root] to obtain a valid cursor for the virtual root +// node of the traversal. +// +// Use the following methods to navigate efficiently around the tree: +// - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing]; +// - for children, use [Cursor.Child], [Cursor.Children], +// [Cursor.FirstChild], and [Cursor.LastChild]; +// - for siblings, use [Cursor.PrevSibling] and [Cursor.NextSibling]; +// - for descendants, use [Cursor.FindByPos], [Cursor.FindNode], +// [Cursor.Inspect], and [Cursor.Preorder]. +// +// Use the [Cursor.ChildAt] and [Cursor.ParentEdge] methods for +// information about the edges in a tree: which field (and slice +// element) of the parent node holds the child. +type Cursor struct { + in *Inspector + index int32 // index of push node; -1 for virtual root node +} + +// Root returns a cursor for the virtual root node, +// whose children are the files provided to [New]. +// +// Its [Cursor.Node] and [Cursor.Stack] methods return nil. +func (in *Inspector) Root() Cursor { + return Cursor{in, -1} +} + +// At returns the cursor at the specified index in the traversal, +// which must have been obtained from [Cursor.Index] on a Cursor +// belonging to the same Inspector (see [Cursor.Inspector]). +func (in *Inspector) At(index int32) Cursor { + if index < 0 { + panic("negative index") + } + if int(index) >= len(in.events) { + panic("index out of range for this inspector") + } + if in.events[index].index < index { + panic("invalid index") // (a push, not a pop) + } + return Cursor{in, index} +} + +// Inspector returns the cursor's Inspector. +func (c Cursor) Inspector() *Inspector { return c.in } + +// Index returns the index of this cursor position within the package. +// +// Clients should not assume anything about the numeric Index value +// except that it increases monotonically throughout the traversal. +// It is provided for use with [At]. +// +// Index must not be called on the Root node. +func (c Cursor) Index() int32 { + if c.index < 0 { + panic("Index called on Root node") + } + return c.index +} + +// Node returns the node at the current cursor position, +// or nil for the cursor returned by [Inspector.Root]. +func (c Cursor) Node() ast.Node { + if c.index < 0 { + return nil + } + return c.in.events[c.index].node +} + +// String returns information about the cursor's node, if any. +func (c Cursor) String() string { + if c.in == nil { + return "(invalid)" + } + if c.index < 0 { + return "(root)" + } + return reflect.TypeOf(c.Node()).String() +} + +// indices return the [start, end) half-open interval of event indices. +func (c Cursor) indices() (int32, int32) { + if c.index < 0 { + return 0, int32(len(c.in.events)) // root: all events + } else { + return c.index, c.in.events[c.index].index + 1 // just one subtree + } +} + +// Preorder returns an iterator over the nodes of the subtree +// represented by c in depth-first order. Each node in the sequence is +// represented by a Cursor that allows access to the Node, but may +// also be used to start a new traversal, or to obtain the stack of +// nodes enclosing the cursor. +// +// The traversal sequence is determined by [ast.Inspect]. The types +// argument, if non-empty, enables type-based filtering of events. The +// function f if is called only for nodes whose type matches an +// element of the types slice. +// +// If you need control over descent into subtrees, +// or need both pre- and post-order notifications, use [Cursor.Inspect] +func (c Cursor) Preorder(types ...ast.Node) iter.Seq[Cursor] { + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain types: skip. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// Inspect visits the nodes of the subtree represented by c in +// depth-first order. It calls f(n) for each node n before it +// visits n's children. If f returns true, Inspect invokes f +// recursively for each of the non-nil children of the node. +// +// Each node is represented by a Cursor that allows access to the +// Node, but may also be used to start a new traversal, or to obtain +// the stack of nodes enclosing the cursor. +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (c Cursor) Inspect(types []ast.Node, f func(c Cursor) (descend bool)) { + mask := maskOf(types) + events := c.in.events + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 && !f(Cursor{c.in, i}) || + events[pop].typ&mask == 0 { + // The user opted not to descend, or the + // subtree does not contain types: + // skip past the pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Enclosing returns an iterator over the nodes enclosing the current +// current node, starting with the Cursor itself. +// +// Enclosing must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// The types argument, if non-empty, enables type-based filtering of +// events: the sequence includes only enclosing nodes whose type +// matches an element of the types slice. +func (c Cursor) Enclosing(types ...ast.Node) iter.Seq[Cursor] { + if c.index < 0 { + panic("Cursor.Enclosing called on Root node") + } + + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + for i := c.index; i >= 0; i = events[i].parent { + if events[i].typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + } + } +} + +// Parent returns the parent of the current node. +// +// Parent must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Parent() Cursor { + if c.index < 0 { + panic("Cursor.Parent called on Root node") + } + + return Cursor{c.in, c.in.events[c.index].parent} +} + +// ParentEdge returns the identity of the field in the parent node +// that holds this cursor's node, and if it is a list, the index within it. +// +// For example, f(x, y) is a CallExpr whose three children are Idents. +// f has edge kind [edge.CallExpr_Fun] and index -1. +// x and y have kind [edge.CallExpr_Args] and indices 0 and 1, respectively. +// +// If called on a child of the Root node, it returns ([edge.Invalid], -1). +// +// ParentEdge must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) ParentEdge() (edge.Kind, int) { + if c.index < 0 { + panic("Cursor.ParentEdge called on Root node") + } + events := c.in.events + pop := events[c.index].index + return unpackEdgeKindAndIndex(events[pop].parent) +} + +// ChildAt returns the cursor for the child of the +// current node identified by its edge and index. +// The index must be -1 if the edge.Kind is not a slice. +// The indicated child node must exist. +// +// ChildAt must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// Invariant: c.Parent().ChildAt(c.ParentEdge()) == c. +func (c Cursor) ChildAt(k edge.Kind, idx int) Cursor { + target := packEdgeKindAndIndex(k, idx) + + // Unfortunately there's no shortcut to looping. + events := c.in.events + i := c.index + 1 + for { + pop := events[i].index + if pop < i { + break + } + if events[pop].parent == target { + return Cursor{c.in, i} + } + i = pop + 1 + } + panic(fmt.Sprintf("ChildAt(%v, %d): no such child of %v", k, idx, c)) +} + +// Child returns the cursor for n, which must be a direct child of c's Node. +// +// Child must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Child(n ast.Node) Cursor { + if c.index < 0 { + panic("Cursor.Child called on Root node") + } + + if false { + // reference implementation + for child := range c.Children() { + if child.Node() == n { + return child + } + } + + } else { + // optimized implementation + events := c.in.events + for i := c.index + 1; events[i].index > i; i = events[i].index + 1 { + if events[i].node == n { + return Cursor{c.in, i} + } + } + } + panic(fmt.Sprintf("Child(%T): not a child of %v", n, c)) +} + +// NextSibling returns the cursor for the next sibling node in the same list +// (for example, of files, decls, specs, statements, fields, or expressions) as +// the current node. It returns (zero, false) if the node is the last node in +// the list, or is not part of a list. +// +// NextSibling must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) NextSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.NextSibling called on Root node") + } + + events := c.in.events + i := events[c.index].index + 1 // after corresponding pop + if i < int32(len(events)) { + if events[i].index > i { // push? + return Cursor{c.in, i}, true + } + } + return Cursor{}, false +} + +// PrevSibling returns the cursor for the previous sibling node in the +// same list (for example, of files, decls, specs, statements, fields, +// or expressions) as the current node. It returns zero if the node is +// the first node in the list, or is not part of a list. +// +// It must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) PrevSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.PrevSibling called on Root node") + } + + events := c.in.events + i := c.index - 1 + if i >= 0 { + if j := events[i].index; j < i { // pop? + return Cursor{c.in, j}, true + } + } + return Cursor{}, false +} + +// FirstChild returns the first direct child of the current node, +// or zero if it has no children. +func (c Cursor) FirstChild() (Cursor, bool) { + events := c.in.events + i := c.index + 1 // i=0 if c is root + if i < int32(len(events)) && events[i].index > i { // push? + return Cursor{c.in, i}, true + } + return Cursor{}, false +} + +// LastChild returns the last direct child of the current node, +// or zero if it has no children. +func (c Cursor) LastChild() (Cursor, bool) { + events := c.in.events + if c.index < 0 { // root? + if len(events) > 0 { + // return push of final event (a pop) + return Cursor{c.in, events[len(events)-1].index}, true + } + } else { + j := events[c.index].index - 1 // before corresponding pop + // Inv: j == c.index if c has no children + // or j is last child's pop. + if j > c.index { // c has children + return Cursor{c.in, events[j].index}, true + } + } + return Cursor{}, false +} + +// Children returns an iterator over the direct children of the +// current node, if any. +// +// When using Children, NextChild, and PrevChild, bear in mind that a +// Node's children may come from different fields, some of which may +// be lists of nodes without a distinguished intervening container +// such as [ast.BlockStmt]. +// +// For example, [ast.CaseClause] has a field List of expressions and a +// field Body of statements, so the children of a CaseClause are a mix +// of expressions and statements. Other nodes that have "uncontained" +// list fields include: +// +// - [ast.ValueSpec] (Names, Values) +// - [ast.CompositeLit] (Type, Elts) +// - [ast.IndexListExpr] (X, Indices) +// - [ast.CallExpr] (Fun, Args) +// - [ast.AssignStmt] (Lhs, Rhs) +// +// So, do not assume that the previous sibling of an ast.Stmt is also +// an ast.Stmt, or if it is, that they are executed sequentially, +// unless you have established that, say, its parent is a BlockStmt +// or its [Cursor.ParentEdge] is [edge.BlockStmt_List]. +// For example, given "for S1; ; S2 {}", the predecessor of S2 is S1, +// even though they are not executed in sequence. +func (c Cursor) Children() iter.Seq[Cursor] { + return func(yield func(Cursor) bool) { + c, ok := c.FirstChild() + for ok && yield(c) { + c, ok = c.NextSibling() + } + } +} + +// Contains reports whether c contains or is equal to c2. +// +// Both Cursors must belong to the same [Inspector]; +// neither may be its Root node. +func (c Cursor) Contains(c2 Cursor) bool { + if c.in != c2.in { + panic("different inspectors") + } + events := c.in.events + return c.index <= c2.index && events[c2.index].index <= events[c.index].index +} + +// FindNode returns the cursor for node n if it belongs to the subtree +// rooted at c. It returns zero if n is not found. +func (c Cursor) FindNode(n ast.Node) (Cursor, bool) { + + // FindNode is equivalent to this code, + // but more convenient and 15-20% faster: + if false { + for candidate := range c.Preorder(n) { + if candidate.Node() == n { + return candidate, true + } + } + return Cursor{}, false + } + + // TODO(adonovan): opt: should we assume Node.Pos is accurate + // and combine type-based filtering with position filtering + // like FindByPos? + + mask := maskOf([]ast.Node{n}) + events := c.in.events + + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && ev.node == n { + return Cursor{c.in, i}, true + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain type of n: skip. + i = pop + } + } + } + return Cursor{}, false +} + +// FindByPos returns the cursor for the innermost node n in the tree +// rooted at c such that n.Pos() <= start && end <= n.End(). +// (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.) +// +// It returns zero if none is found. +// Precondition: start <= end. +// +// See also [astutil.PathEnclosingInterval], which +// tolerates adjoining whitespace. +func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { + if end < start { + panic("end < start") + } + events := c.in.events + + // This algorithm could be implemented using c.Inspect, + // but it is about 2.5x slower. + + best := int32(-1) // push index of latest (=innermost) node containing range + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + n := ev.node + var nodeEnd token.Pos + if file, ok := n.(*ast.File); ok { + nodeEnd = file.FileEnd + // Note: files may be out of Pos order. + if file.FileStart > start { + i = ev.index // disjoint, after; skip to next file + continue + } + } else { + nodeEnd = n.End() + if n.Pos() > start { + break // disjoint, after; stop + } + } + // Inv: node.{Pos,FileStart} <= start + if end <= nodeEnd { + // node fully contains target range + best = i + } else if nodeEnd < start { + i = ev.index // disjoint, before; skip forward + } + } + } + if best >= 0 { + return Cursor{c.in, best}, true + } + return Cursor{}, false +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 674490a65b..a703cdfcf9 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -13,10 +13,19 @@ // This representation is sometimes called a "balanced parenthesis tree." // // Experiments suggest the inspector's traversals are about 2.5x faster -// than ast.Inspect, but it may take around 5 traversals for this +// than [ast.Inspect], but it may take around 5 traversals for this // benefit to amortize the inspector's construction cost. // If efficiency is the primary concern, do not use Inspector for // one-off traversals. +// +// The [Cursor] type provides a more flexible API for efficient +// navigation of syntax trees in all four "cardinal directions". For +// example, traversals may be nested, so you can find each node of +// type A and then search within it for nodes of type B. Or you can +// traverse from a node to its immediate neighbors: its parent, its +// previous and next sibling, or its first and last child. We +// recommend using methods of Cursor in preference to Inspector where +// possible. package inspector // There are four orthogonal features in a traversal: @@ -37,9 +46,8 @@ package inspector import ( "go/ast" - _ "unsafe" - "golang.org/x/tools/internal/astutil/edge" + "golang.org/x/tools/go/ast/edge" ) // An Inspector provides methods for inspecting @@ -48,18 +56,12 @@ type Inspector struct { events []event } -//go:linkname events golang.org/x/tools/go/ast/inspector.events -func events(in *Inspector) []event { return in.events } - -//go:linkname packEdgeKindAndIndex golang.org/x/tools/go/ast/inspector.packEdgeKindAndIndex func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { return int32(uint32(index+1)<<7 | uint32(ek)) } // unpackEdgeKindAndIndex unpacks the edge kind and edge index (within // an []ast.Node slice) from the parent field of a pop event. -// -//go:linkname unpackEdgeKindAndIndex golang.org/x/tools/go/ast/inspector.unpackEdgeKindAndIndex func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { // The "parent" field of a pop node holds the // edge Kind in the lower 7 bits and the index+1 @@ -83,15 +85,21 @@ type event struct { // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). // Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits // n's children. // -// The complete traversal sequence is determined by ast.Inspect. +// The complete traversal sequence is determined by [ast.Inspect]. // The types argument, if non-empty, enables type-based filtering of // events. The function f is called only for nodes whose type // matches an element of the types slice. +// +// The [Cursor.Preorder] method provides a richer alternative interface. +// Example: +// +// for c := range in.Root().Preorder(types) { ... } func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // Because it avoids postorder calls to f, and the pruning // check, Preorder is almost twice as fast as Nodes. The two @@ -131,10 +139,18 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // of the non-nil children of the node, followed by a call of // f(n, false). // -// The complete traversal sequence is determined by ast.Inspect. +// The complete traversal sequence is determined by [ast.Inspect]. // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// ... +// return true +// } func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) for i := int32(0); i < int32(len(in.events)); { @@ -168,6 +184,15 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc // supplies each call to f an additional argument, the current // traversal stack. The stack's first element is the outermost node, // an *ast.File; its last is the innermost, n. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// stack := slices.Collect(c.Enclosing()) +// ... +// return true +// }) func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node @@ -233,7 +258,7 @@ type visitor struct { type item struct { index int32 // index of current node's push event parentIndex int32 // index of parent node's push event - typAccum uint64 // accumulated type bits of current node's descendents + typAccum uint64 // accumulated type bits of current node's descendants edgeKindAndIndex int32 // edge.Kind and index, bit packed } diff --git a/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index e936c67c98..9852331a3d 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - _ "unsafe" ) const ( @@ -217,7 +215,6 @@ func typeOf(n ast.Node) uint64 { return 0 } -//go:linkname maskOf golang.org/x/tools/go/ast/inspector.maskOf func maskOf(nodes []ast.Node) uint64 { if len(nodes) == 0 { return math.MaxUint64 // match all node types diff --git a/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/walk.go index 5a42174a0a..5f1c93c8a7 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/walk.go +++ b/hack/tools/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -13,7 +13,7 @@ import ( "fmt" "go/ast" - "golang.org/x/tools/internal/astutil/edge" + "golang.org/x/tools/go/ast/edge" ) func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { diff --git a/hack/tools/vendor/golang.org/x/tools/go/packages/doc.go b/hack/tools/vendor/golang.org/x/tools/go/packages/doc.go index f1931d10ee..366aab6b2c 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/packages/doc.go +++ b/hack/tools/vendor/golang.org/x/tools/go/packages/doc.go @@ -76,6 +76,8 @@ uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. +See also [golang.org/x/tools/go/packages/internal/linecount] +for an example application. # The driver protocol diff --git a/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go b/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go index 96e43cd809..89f89dd2dc 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go +++ b/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go @@ -224,13 +224,22 @@ extractQueries: return response.dr, nil } +// abs returns an absolute representation of path, based on cfg.Dir. +func (cfg *Config) abs(path string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + // In case cfg.Dir is relative, pass it to filepath.Abs. + return filepath.Abs(filepath.Join(cfg.Dir, path)) +} + func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) // Pass absolute path of directory to go list so that it knows to treat it as a directory, // not a package path. - pattern, err := filepath.Abs(fdir) + pattern, err := state.cfg.abs(fdir) if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } @@ -703,9 +712,8 @@ func (state *golistState) getGoVersion() (int, error) { // getPkgPath finds the package path of a directory if it's relative to a root // directory. func (state *golistState) getPkgPath(dir string) (string, bool, error) { - absDir, err := filepath.Abs(dir) - if err != nil { - return "", false, err + if !filepath.IsAbs(dir) { + panic("non-absolute dir passed to getPkgPath") } roots, err := state.determineRootDirs() if err != nil { @@ -715,7 +723,7 @@ func (state *golistState) getPkgPath(dir string) (string, bool, error) { for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, rdir) { + if !strings.HasPrefix(dir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. diff --git a/hack/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/hack/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go index d823c474ad..d9d5a45cd4 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/hack/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -55,7 +55,7 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error) } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - absDir, err := filepath.Abs(mod.Dir) + absDir, err := state.cfg.abs(mod.Dir) if err != nil { return nil, err } diff --git a/hack/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/hack/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 16ed3c1780..d3c2913bef 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/hack/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -603,7 +603,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { type hasTypeParams interface { TypeParams() *types.TypeParamList } - // abstraction of *types.{Named,TypeParam} + // abstraction of *types.{Alias,Named,TypeParam} type hasObj interface { Obj() *types.TypeName } diff --git a/hack/tools/vendor/golang.org/x/tools/internal/imports/fix.go b/hack/tools/vendor/golang.org/x/tools/internal/imports/fix.go index 89b96381cd..50b6ca51a6 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,12 +27,13 @@ import ( "unicode" "unicode/utf8" + "maps" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" "golang.org/x/tools/internal/stdlib" - "maps" ) // importToGroup is a list of functions which map from an import path to @@ -290,8 +291,8 @@ func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) erro return nil } -// if there is a trailing major version, remove it -func withoutVersion(nm string) string { +// WithoutVersion removes a trailing major version, if there is one. +func WithoutVersion(nm string) string { if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { if _, err := strconv.Atoi(v[1:]); err == nil { // this is, for instance, called with rand/v2 and returns rand @@ -313,7 +314,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { } known := p.knownPackages[imp.ImportPath] if known != nil && known.Name != "" { - return withoutVersion(known.Name) + return WithoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } diff --git a/hack/tools/vendor/golang.org/x/tools/internal/imports/imports.go b/hack/tools/vendor/golang.org/x/tools/internal/imports/imports.go index 2215a12880..b5f5218b5c 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/imports/imports.go @@ -93,7 +93,7 @@ func FixImports(ctx context.Context, filename string, src []byte, goroot string, // env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { // Don't use parse() -- we don't care about fragments or statement lists - // here, and we need to work with unparseable files. + // here, and we need to work with unparsable files. fileSet := token.NewFileSet() parserMode := parser.SkipObjectResolution if opt.Comments { diff --git a/hack/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/hack/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go index 05229f06ce..ca745d4a1b 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -15,6 +15,10 @@ import ( // This code is here rather than in the modindex package // to avoid import loops +// TODO(adonovan): this code is only used by a test in this package. +// Can we delete it? Or is there a plan to call NewIndexSource from +// cmd/goimports? + // implements Source using modindex, so only for module cache. // // this is perhaps over-engineered. A new Index is read at first use. @@ -22,8 +26,8 @@ import ( // is read if the index changed. It is not clear the Mutex is needed. type IndexSource struct { modcachedir string - mutex sync.Mutex - ix *modindex.Index + mu sync.Mutex + index *modindex.Index // (access via getIndex) expires time.Time } @@ -39,13 +43,14 @@ func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths } func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { - if err := s.maybeReadIndex(); err != nil { + index, err := s.getIndex() + if err != nil { return nil, err } var cs []modindex.Candidate for pkg, nms := range missing { for nm := range nms { - x := s.ix.Lookup(pkg, nm, false) + x := index.Lookup(pkg, nm, false) cs = append(cs, x...) } } @@ -74,30 +79,22 @@ func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, mi return ans, nil } -func (s *IndexSource) maybeReadIndex() error { - s.mutex.Lock() - defer s.mutex.Unlock() - - var readIndex bool - if time.Now().After(s.expires) { - ok, err := modindex.Update(s.modcachedir) - if err != nil { - return err - } - if ok { - readIndex = true - } - } +func (s *IndexSource) getIndex() (*modindex.Index, error) { + s.mu.Lock() + defer s.mu.Unlock() - if readIndex || s.ix == nil { - ix, err := modindex.ReadIndex(s.modcachedir) + // (s.index = nil => s.expires is zero, + // so the first condition is strictly redundant. + // But it makes the postcondition very clear.) + if s.index == nil || time.Now().After(s.expires) { + index, err := modindex.Update(s.modcachedir) if err != nil { - return err + return nil, err } - s.ix = ix - // for now refresh every 15 minutes - s.expires = time.Now().Add(time.Minute * 15) + s.index = index + s.expires = index.ValidAt.Add(15 * time.Minute) // (refresh period) } + // Inv: s.index != nil - return nil + return s.index, nil } diff --git a/hack/tools/vendor/golang.org/x/tools/internal/modindex/directories.go b/hack/tools/vendor/golang.org/x/tools/internal/modindex/directories.go index 1e1a02f239..9a963744b5 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/modindex/directories.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "regexp" - "slices" "strings" "sync" "time" @@ -20,50 +19,48 @@ import ( ) type directory struct { - path Relpath + path string // relative to GOMODCACHE importPath string version string // semantic version - syms []symbol } -// filterDirs groups the directories by import path, -// sorting the ones with the same import path by semantic version, -// most recent first. -func byImportPath(dirs []Relpath) (map[string][]*directory, error) { - ans := make(map[string][]*directory) // key is import path - for _, d := range dirs { - ip, sv, err := DirToImportPathVersion(d) +// bestDirByImportPath returns the best directory for each import +// path, where "best" means most recent semantic version. These import +// paths are inferred from the GOMODCACHE-relative dir names in dirs. +func bestDirByImportPath(dirs []string) (map[string]directory, error) { + dirsByPath := make(map[string]directory) + for _, dir := range dirs { + importPath, version, err := dirToImportPathVersion(dir) if err != nil { return nil, err } - ans[ip] = append(ans[ip], &directory{ - path: d, - importPath: ip, - version: sv, - }) - } - for k, v := range ans { - semanticSort(v) - ans[k] = v + new := directory{ + path: dir, + importPath: importPath, + version: version, + } + if old, ok := dirsByPath[importPath]; !ok || compareDirectory(new, old) < 0 { + dirsByPath[importPath] = new + } } - return ans, nil + return dirsByPath, nil } -// sort the directories by semantic version, latest first -func semanticSort(v []*directory) { - slices.SortFunc(v, func(l, r *directory) int { - if n := semver.Compare(l.version, r.version); n != 0 { - return -n // latest first - } - return strings.Compare(string(l.path), string(r.path)) - }) +// compareDirectory defines an ordering of path@version directories, +// by descending version, then by ascending path. +func compareDirectory(x, y directory) int { + if sign := -semver.Compare(x.version, y.version); sign != 0 { + return sign // latest first + } + return strings.Compare(string(x.path), string(y.path)) } // modCacheRegexp splits a relpathpath into module, module version, and package. var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) -// DirToImportPathVersion computes import path and semantic version -func DirToImportPathVersion(dir Relpath) (string, string, error) { +// dirToImportPathVersion computes import path and semantic version +// from a GOMODCACHE-relative directory name. +func dirToImportPathVersion(dir string) (string, string, error) { m := modCacheRegexp.FindStringSubmatch(string(dir)) // m[1] is the module path // m[2] is the version major.minor.patch(-
 that contains the name
+// Package modindex contains code for building and searching an
+// [Index] of the Go module cache.
+package modindex
+
+// The directory containing the index, returned by
+// [IndexDir], contains a file index-name- that contains the name
 // of the current index. We believe writing that short file is atomic.
-// ReadIndex reads that file to get the file name of the index.
+// [Read] reads that file to get the file name of the index.
 // WriteIndex writes an index with a unique name and then
 // writes that name into a new version of index-name-.
 // ( stands for the CurrentVersion of the index format.)
-package modindex
 
 import (
+	"maps"
+	"os"
 	"path/filepath"
 	"slices"
 	"strings"
@@ -21,144 +25,95 @@ import (
 	"golang.org/x/mod/semver"
 )
 
-// Create always creates a new index for the go module cache that is in cachedir.
-func Create(cachedir string) error {
-	_, err := indexModCache(cachedir, true)
-	return err
-}
-
-// Update the index for the go module cache that is in cachedir,
-// If there is no existing index it will build one.
-// If there are changed directories since the last index, it will
-// write a new one and return true. Otherwise it returns false.
-func Update(cachedir string) (bool, error) {
-	return indexModCache(cachedir, false)
+// Update updates the index for the specified Go
+// module cache directory, creating it as needed.
+// On success it returns the current index.
+func Update(gomodcache string) (*Index, error) {
+	prev, err := Read(gomodcache)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return nil, err
+		}
+		prev = nil
+	}
+	return update(gomodcache, prev)
 }
 
-// indexModCache writes an index current as of when it is called.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and the updates to the cache. It returns true if it wrote an index,
-// false otherwise.
-func indexModCache(cachedir string, clear bool) (bool, error) {
-	cachedir, err := filepath.Abs(cachedir)
+// update builds, writes, and returns the current index.
+//
+// If old is nil, the new index is built from all of GOMODCACHE;
+// otherwise it is built from the old index plus cache updates
+// since the previous index's time.
+func update(gomodcache string, old *Index) (*Index, error) {
+	gomodcache, err := filepath.Abs(gomodcache)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	cd := Abspath(cachedir)
-	future := time.Now().Add(24 * time.Hour) // safely in the future
-	ok, err := modindexTimed(future, cd, clear)
+	new, changed, err := build(gomodcache, old)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	return ok, nil
-}
-
-// modindexTimed writes an index current as of onlyBefore.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and all the updates to the cache before onlyBefore.
-// It returns true if it wrote a new index, false if it wrote nothing.
-func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
-	var curIndex *Index
-	if !clear {
-		var err error
-		curIndex, err = ReadIndex(string(cachedir))
-		if clear && err != nil {
-			return false, err
+	if old == nil || changed {
+		if err := write(gomodcache, new); err != nil {
+			return nil, err
 		}
-		// TODO(pjw): check that most of those directories still exist
-	}
-	cfg := &work{
-		onlyBefore: onlyBefore,
-		oldIndex:   curIndex,
-		cacheDir:   cachedir,
-	}
-	if curIndex != nil {
-		cfg.onlyAfter = curIndex.Changed
-	}
-	if err := cfg.buildIndex(); err != nil {
-		return false, err
 	}
-	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
-		// no changes from existing curIndex, don't write a new index
-		return false, nil
-	}
-	if err := cfg.writeIndex(); err != nil {
-		return false, err
-	}
-	return true, nil
-}
-
-type work struct {
-	onlyBefore time.Time // do not use directories later than this
-	onlyAfter  time.Time // only interested in directories after this
-	// directories from before onlyAfter come from oldIndex
-	oldIndex *Index
-	newIndex *Index
-	cacheDir Abspath
+	return new, nil
 }
 
-func (w *work) buildIndex() error {
-	// The effective date of the new index should be at least
-	// slightly earlier than when the directories are scanned
-	// so set it now.
-	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
-	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
-	if len(dirs) == 0 {
-		return nil
+// build returns a new index for the specified Go module cache (an
+// absolute path).
+//
+// If an old index is provided, only directories more recent than it
+// that it are scanned; older directories are provided by the old
+// Index.
+//
+// The boolean result indicates whether new entries were found.
+func build(gomodcache string, old *Index) (*Index, bool, error) {
+	// Set the time window.
+	var start time.Time // = dawn of time
+	if old != nil {
+		start = old.ValidAt
 	}
-	newdirs, err := byImportPath(dirs)
+	now := time.Now()
+	end := now.Add(24 * time.Hour) // safely in the future
+
+	// Enumerate GOMODCACHE package directories.
+	// Choose the best (latest) package for each import path.
+	pkgDirs := findDirs(gomodcache, start, end)
+	dirByPath, err := bestDirByImportPath(pkgDirs)
 	if err != nil {
-		return err
+		return nil, false, err
 	}
-	// for each import path it might occur only in newdirs,
-	// only in w.oldIndex, or in both.
-	// If it occurs in both, use the semantically later one
-	if w.oldIndex != nil {
-		for _, e := range w.oldIndex.Entries {
-			found, ok := newdirs[e.ImportPath]
-			if !ok {
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				continue // use this one, there is no new one
-			}
-			if semver.Compare(found[0].version, e.Version) > 0 {
-				// use the new one
-			} else {
-				// use the old one, forget the new one
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				delete(newdirs, e.ImportPath)
+
+	// For each import path it might occur only in
+	// dirByPath, only in old, or in both.
+	// If both, use the semantically later one.
+	var entries []Entry
+	if old != nil {
+		for _, entry := range old.Entries {
+			dir, ok := dirByPath[entry.ImportPath]
+			if !ok || semver.Compare(dir.version, entry.Version) <= 0 {
+				// New dir is missing or not more recent; use old entry.
+				entries = append(entries, entry)
+				delete(dirByPath, entry.ImportPath)
 			}
 		}
 	}
-	// get symbol information for all the new diredtories
-	getSymbols(w.cacheDir, newdirs)
-	// assemble the new index entries
-	for k, v := range newdirs {
-		d := v[0]
-		pkg, names := processSyms(d.syms)
-		if pkg == "" {
-			continue // PJW: does this ever happen?
-		}
-		entry := Entry{
-			PkgName:    pkg,
-			Dir:        d.path,
-			ImportPath: k,
-			Version:    d.version,
-			Names:      names,
-		}
-		w.newIndex.Entries = append(w.newIndex.Entries, entry)
-	}
-	// sort the entries in the new index
-	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
-		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+
+	// Extract symbol information for all the new directories.
+	newEntries := extractSymbols(gomodcache, maps.Values(dirByPath))
+	entries = append(entries, newEntries...)
+	slices.SortFunc(entries, func(x, y Entry) int {
+		if n := strings.Compare(x.PkgName, y.PkgName); n != 0 {
 			return n
 		}
-		return strings.Compare(l.ImportPath, r.ImportPath)
+		return strings.Compare(x.ImportPath, y.ImportPath)
 	})
-	return nil
-}
 
-func (w *work) writeIndex() error {
-	return writeIndex(w.cacheDir, w.newIndex)
+	return &Index{
+		GOMODCACHE: gomodcache,
+		ValidAt:    now, // time before the directories were scanned
+		Entries:    entries,
+	}, len(newEntries) > 0, nil
 }
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go b/hack/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
index b918529d43..fe24db9b13 100644
--- a/hack/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
+++ b/hack/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -10,11 +10,13 @@ import (
 	"go/parser"
 	"go/token"
 	"go/types"
+	"iter"
 	"os"
 	"path/filepath"
 	"runtime"
 	"slices"
 	"strings"
+	"sync"
 
 	"golang.org/x/sync/errgroup"
 )
@@ -30,45 +32,69 @@ import (
 type symbol struct {
 	pkg  string // name of the symbols's package
 	name string // declared name
-	kind string // T, C, V, or F, follwed by D if deprecated
+	kind string // T, C, V, or F, followed by D if deprecated
 	sig  string // signature information, for F
 }
 
-// find the symbols for the best directories
-func getSymbols(cd Abspath, dirs map[string][]*directory) {
+// extractSymbols returns a (new, unordered) array of Entries, one for
+// each provided package directory, describing its exported symbols.
+func extractSymbols(cwd string, dirs iter.Seq[directory]) []Entry {
+	var (
+		mu      sync.Mutex
+		entries []Entry
+	)
+
 	var g errgroup.Group
 	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
-	for _, vv := range dirs {
-		// throttling some day?
-		d := vv[0]
+	for dir := range dirs {
 		g.Go(func() error {
-			thedir := filepath.Join(string(cd), string(d.path))
+			thedir := filepath.Join(cwd, string(dir.path))
 			mode := parser.SkipObjectResolution | parser.ParseComments
 
-			fi, err := os.ReadDir(thedir)
+			// Parse all Go files in dir and extract symbols.
+			dirents, err := os.ReadDir(thedir)
 			if err != nil {
 				return nil // log this someday?
 			}
-			for _, fx := range fi {
-				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+			var syms []symbol
+			for _, dirent := range dirents {
+				if !strings.HasSuffix(dirent.Name(), ".go") ||
+					strings.HasSuffix(dirent.Name(), "_test.go") {
 					continue
 				}
-				fname := filepath.Join(thedir, fx.Name())
+				fname := filepath.Join(thedir, dirent.Name())
 				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
 				if err != nil {
 					continue // ignore errors, someday log them?
 				}
-				d.syms = append(d.syms, getFileExports(tr)...)
+				syms = append(syms, getFileExports(tr)...)
+			}
+
+			// Create an entry for the package.
+			pkg, names := processSyms(syms)
+			if pkg != "" {
+				mu.Lock()
+				defer mu.Unlock()
+				entries = append(entries, Entry{
+					PkgName:    pkg,
+					Dir:        dir.path,
+					ImportPath: dir.importPath,
+					Version:    dir.version,
+					Names:      names,
+				})
 			}
+
 			return nil
 		})
 	}
-	g.Wait()
+	g.Wait() // ignore error
+
+	return entries
 }
 
 func getFileExports(f *ast.File) []symbol {
 	pkg := f.Name.Name
-	if pkg == "main" {
+	if pkg == "main" || pkg == "" {
 		return nil
 	}
 	var ans []symbol
@@ -110,7 +136,7 @@ func getFileExports(f *ast.File) []symbol {
 				// The only place a $ can occur seems to be in a struct tag, which
 				// can be an arbitrary string literal, and ExprString does not presently
 				// print struct tags. So for this to happen the type of a formal parameter
-				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
+				// has to be a explicit struct, e.g. foo(x struct{a int "$"}) and ExprString
 				// would have to show the struct tag. Even testing for this case seems
 				// a waste of effort, but let's remember the possibility
 				if strings.Contains(tp, "$") {
@@ -202,17 +228,18 @@ func processSyms(syms []symbol) (string, []string) {
 	pkg := syms[0].pkg
 	var names []string
 	for _, s := range syms {
+		if s.pkg != pkg {
+			// Symbols came from two files in same dir
+			// with different package declarations.
+			continue
+		}
 		var nx string
-		if s.pkg == pkg {
-			if s.sig != "" {
-				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
-			} else {
-				nx = fmt.Sprintf("%s %s", s.name, s.kind)
-			}
-			names = append(names, nx)
+		if s.sig != "" {
+			nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
 		} else {
-			continue // PJW: do we want to keep track of these?
+			nx = fmt.Sprintf("%s %s", s.name, s.kind)
 		}
+		names = append(names, nx)
 	}
 	return pkg, names
 }
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/modindex/types.go b/hack/tools/vendor/golang.org/x/tools/internal/modindex/types.go
deleted file mode 100644
index ece4488630..0000000000
--- a/hack/tools/vendor/golang.org/x/tools/internal/modindex/types.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modindex
-
-import (
-	"strings"
-)
-
-// some special types to avoid confusions
-
-// distinguish various types of directory names. It's easy to get confused.
-type Abspath string // absolute paths
-type Relpath string // paths with GOMODCACHE prefix removed
-
-func toRelpath(cachedir Abspath, s string) Relpath {
-	if strings.HasPrefix(s, string(cachedir)) {
-		if s == string(cachedir) {
-			return Relpath("")
-		}
-		return Relpath(s[len(cachedir)+1:])
-	}
-	return Relpath(s)
-}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 73eefa2a7d..929b470beb 100644
--- a/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,6 +5,8 @@
 // Package packagesinternal exposes internal-only fields from go/packages.
 package packagesinternal
 
+import "fmt"
+
 var GetDepsErrors = func(p any) []*PackageError { return nil }
 
 type PackageError struct {
@@ -13,5 +15,9 @@ type PackageError struct {
 	Err         string   // the error itself
 }
 
+func (err PackageError) String() string {
+	return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack)
+}
+
 var TypecheckCgo int
 var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
index 649c82b6be..3db2a135b9 100644
--- a/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
+++ b/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
@@ -65,14 +65,16 @@ func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
 	if info.Types == nil {
 		panic("ClassifyCall: info.Types is nil")
 	}
-	if info.Types[call.Fun].IsType() {
+	tv := info.Types[call.Fun]
+	if tv.IsType() {
 		return CallConversion
 	}
+	if tv.IsBuiltin() {
+		return CallBuiltin
+	}
 	obj := info.Uses[UsedIdent(info, call.Fun)]
 	// Classify the call by the type of the object, if any.
 	switch obj := obj.(type) {
-	case *types.Builtin:
-		return CallBuiltin
 	case *types.Func:
 		if interfaceMethod(obj) {
 			return CallInterface
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go b/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
index cc244689ef..a5cd7e8dbf 100644
--- a/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -69,6 +69,34 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
 	}
 }
 
+// TypeNameFor returns the type name symbol for the specified type, if
+// it is a [*types.Alias], [*types.Named], [*types.TypeParam], or a
+// [*types.Basic] representing a type.
+//
+// For all other types, and for Basic types representing a builtin,
+// constant, or nil, it returns nil. Be careful not to convert the
+// resulting nil pointer to a [types.Object]!
+//
+// If t is the type of a constant, it may be an "untyped" type, which
+// has no TypeName. To access the name of such types (e.g. "untyped
+// int"), use [types.Basic.Name].
+func TypeNameFor(t types.Type) *types.TypeName {
+	switch t := t.(type) {
+	case *types.Alias:
+		return t.Obj()
+	case *types.Named:
+		return t.Obj()
+	case *types.TypeParam:
+		return t.Obj()
+	case *types.Basic:
+		// See issues #71886 and #66890 for some history.
+		if tname, ok := types.Universe.Lookup(t.Name()).(*types.TypeName); ok {
+			return tname
+		}
+	}
+	return nil
+}
+
 // A NamedOrAlias is a [types.Type] that is named (as
 // defined by the spec) and capable of bearing type parameters: it
 // abstracts aliases ([types.Alias]) and defined types
@@ -77,7 +105,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
 // Every type declared by an explicit "type" declaration is a
 // NamedOrAlias. (Built-in type symbols may additionally
 // have type [types.Basic], which is not a NamedOrAlias,
-// though the spec regards them as "named".)
+// though the spec regards them as "named"; see [TypeNameFor].)
 //
 // NamedOrAlias cannot expose the Origin method, because
 // [types.Alias.Origin] and [types.Named.Origin] have different
@@ -85,32 +113,15 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
 type NamedOrAlias interface {
 	types.Type
 	Obj() *types.TypeName
-	// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
-}
-
-// TypeParams is a light shim around t.TypeParams().
-// (go/types.Alias).TypeParams requires >= 1.23.
-func TypeParams(t NamedOrAlias) *types.TypeParamList {
-	switch t := t.(type) {
-	case *types.Alias:
-		return aliases.TypeParams(t)
-	case *types.Named:
-		return t.TypeParams()
-	}
-	return nil
+	TypeArgs() *types.TypeList
+	TypeParams() *types.TypeParamList
+	SetTypeParams(tparams []*types.TypeParam)
 }
 
-// TypeArgs is a light shim around t.TypeArgs().
-// (go/types.Alias).TypeArgs requires >= 1.23.
-func TypeArgs(t NamedOrAlias) *types.TypeList {
-	switch t := t.(type) {
-	case *types.Alias:
-		return aliases.TypeArgs(t)
-	case *types.Named:
-		return t.TypeArgs()
-	}
-	return nil
-}
+var (
+	_ NamedOrAlias = (*types.Alias)(nil)
+	_ NamedOrAlias = (*types.Named)(nil)
+)
 
 // Origin returns the generic type of the Named or Alias type t if it
 // is instantiated, otherwise it returns t.
diff --git a/hack/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/hack/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc983e..743bfb81d6 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) {
 func SizeVarint(v uint64) int {
 	// This computes 1 + (bits.Len64(v)-1)/7.
 	// 9/64 is a good enough approximation of 1/7
-	return int(9*uint32(bits.Len64(v))+64) / 64
+	//
+	// The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
+	// instruction, which is very fast on CPUs from the last few years. The
+	// specific way of expressing the calculation matches C++ Protobuf, see
+	// https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
+	// optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
+
+	// By OR'ing v with 1, we guarantee that v is never 0, without changing the
+	// result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
+	// needs to add extra instructions to handle that case.
+	//
+	// The Go compiler currently (go1.24.4) does not make use of this knowledge.
+	// This opportunity (removing the XOR instruction, which handles the 0 case)
+	// results in a small (1%) performance win across CPU architectures.
+	//
+	// Independently of avoiding the 0 case, we need the v |= 1 line because
+	// it allows the Go compiler to eliminate an extra XCHGL barrier.
+	v |= 1
+
+	// It would be clearer to write log2value := 63 - uint32(...), but
+	// writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
+	// Proof of identity for our value range [0..63]:
+	// https://go.dev/play/p/Pdn9hEWYakX
+	log2value := uint32(bits.LeadingZeros64(v)) ^ 63
+	return int((log2value*9 + (64 + 9)) / 64)
 }
 
 // AppendFixed32 appends v to b as a little-endian uint32.
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/hack/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 5a57ef6f3c..04696351ee 100644
Binary files a/hack/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/hack/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 10132c9b38..a0aad2777f 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -69,6 +69,12 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
 				parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
 			case genid.FeatureSet_JsonFormat_field_number:
 				parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
+			case genid.FeatureSet_EnforceNamingStyle_field_number:
+				// EnforceNamingStyle is enforced in protoc, languages other than C++
+				// are not supposed to do anything with this feature.
+			case genid.FeatureSet_DefaultSymbolVisibility_field_number:
+				// DefaultSymbolVisibility is enforced in protoc, runtimes should not
+				// inspect this value.
 			default:
 				panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
 			}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
new file mode 100644
index 0000000000..a12ec9791c
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import "google.golang.org/protobuf/reflect/protoreflect"
+
+// UsePresenceForField reports whether the presence bitmap should be used for
+// the specified field.
+func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+	switch {
+	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+		// Oneof fields never use the presence bitmap.
+		//
+		// Synthetic oneofs are an exception: Those are used to implement proto3
+		// optional fields and hence should follow non-oneof field semantics.
+		return false, false
+
+	case fd.IsMap():
+		// Map-typed fields never use the presence bitmap.
+		return false, false
+
+	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+		// Lazy fields always use the presence bitmap (only messages can be lazy).
+		isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
+		return isLazy, isLazy
+
+	default:
+		// If the field has presence, use the presence bitmap.
+		return fd.HasPresence(), false
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
index df8f918501..3ceb6fa7f5 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -27,6 +27,7 @@ const (
 	Api_SourceContext_field_name protoreflect.Name = "source_context"
 	Api_Mixins_field_name        protoreflect.Name = "mixins"
 	Api_Syntax_field_name        protoreflect.Name = "syntax"
+	Api_Edition_field_name       protoreflect.Name = "edition"
 
 	Api_Name_field_fullname          protoreflect.FullName = "google.protobuf.Api.name"
 	Api_Methods_field_fullname       protoreflect.FullName = "google.protobuf.Api.methods"
@@ -35,6 +36,7 @@ const (
 	Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
 	Api_Mixins_field_fullname        protoreflect.FullName = "google.protobuf.Api.mixins"
 	Api_Syntax_field_fullname        protoreflect.FullName = "google.protobuf.Api.syntax"
+	Api_Edition_field_fullname       protoreflect.FullName = "google.protobuf.Api.edition"
 )
 
 // Field numbers for google.protobuf.Api.
@@ -46,6 +48,7 @@ const (
 	Api_SourceContext_field_number protoreflect.FieldNumber = 5
 	Api_Mixins_field_number        protoreflect.FieldNumber = 6
 	Api_Syntax_field_number        protoreflect.FieldNumber = 7
+	Api_Edition_field_number       protoreflect.FieldNumber = 8
 )
 
 // Names for google.protobuf.Method.
@@ -63,6 +66,7 @@ const (
 	Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
 	Method_Options_field_name           protoreflect.Name = "options"
 	Method_Syntax_field_name            protoreflect.Name = "syntax"
+	Method_Edition_field_name           protoreflect.Name = "edition"
 
 	Method_Name_field_fullname              protoreflect.FullName = "google.protobuf.Method.name"
 	Method_RequestTypeUrl_field_fullname    protoreflect.FullName = "google.protobuf.Method.request_type_url"
@@ -71,6 +75,7 @@ const (
 	Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
 	Method_Options_field_fullname           protoreflect.FullName = "google.protobuf.Method.options"
 	Method_Syntax_field_fullname            protoreflect.FullName = "google.protobuf.Method.syntax"
+	Method_Edition_field_fullname           protoreflect.FullName = "google.protobuf.Method.edition"
 )
 
 // Field numbers for google.protobuf.Method.
@@ -82,6 +87,7 @@ const (
 	Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
 	Method_Options_field_number           protoreflect.FieldNumber = 6
 	Method_Syntax_field_number            protoreflect.FieldNumber = 7
+	Method_Edition_field_number           protoreflect.FieldNumber = 8
 )
 
 // Names for google.protobuf.Mixin.
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index f30ab6b586..950a6a325a 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -34,6 +34,19 @@ const (
 	Edition_EDITION_MAX_enum_value             = 2147483647
 )
 
+// Full and short names for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
+	SymbolVisibility_enum_name     = "SymbolVisibility"
+)
+
+// Enum values for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_VISIBILITY_UNSET_enum_value  = 0
+	SymbolVisibility_VISIBILITY_LOCAL_enum_value  = 1
+	SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
+)
+
 // Names for google.protobuf.FileDescriptorSet.
 const (
 	FileDescriptorSet_message_name     protoreflect.Name     = "FileDescriptorSet"
@@ -65,6 +78,7 @@ const (
 	FileDescriptorProto_Dependency_field_name       protoreflect.Name = "dependency"
 	FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
 	FileDescriptorProto_WeakDependency_field_name   protoreflect.Name = "weak_dependency"
+	FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
 	FileDescriptorProto_MessageType_field_name      protoreflect.Name = "message_type"
 	FileDescriptorProto_EnumType_field_name         protoreflect.Name = "enum_type"
 	FileDescriptorProto_Service_field_name          protoreflect.Name = "service"
@@ -79,6 +93,7 @@ const (
 	FileDescriptorProto_Dependency_field_fullname       protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
 	FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
 	FileDescriptorProto_WeakDependency_field_fullname   protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+	FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
 	FileDescriptorProto_MessageType_field_fullname      protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
 	FileDescriptorProto_EnumType_field_fullname         protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
 	FileDescriptorProto_Service_field_fullname          protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
@@ -96,6 +111,7 @@ const (
 	FileDescriptorProto_Dependency_field_number       protoreflect.FieldNumber = 3
 	FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
 	FileDescriptorProto_WeakDependency_field_number   protoreflect.FieldNumber = 11
+	FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
 	FileDescriptorProto_MessageType_field_number      protoreflect.FieldNumber = 4
 	FileDescriptorProto_EnumType_field_number         protoreflect.FieldNumber = 5
 	FileDescriptorProto_Service_field_number          protoreflect.FieldNumber = 6
@@ -124,6 +140,7 @@ const (
 	DescriptorProto_Options_field_name        protoreflect.Name = "options"
 	DescriptorProto_ReservedRange_field_name  protoreflect.Name = "reserved_range"
 	DescriptorProto_ReservedName_field_name   protoreflect.Name = "reserved_name"
+	DescriptorProto_Visibility_field_name     protoreflect.Name = "visibility"
 
 	DescriptorProto_Name_field_fullname           protoreflect.FullName = "google.protobuf.DescriptorProto.name"
 	DescriptorProto_Field_field_fullname          protoreflect.FullName = "google.protobuf.DescriptorProto.field"
@@ -135,6 +152,7 @@ const (
 	DescriptorProto_Options_field_fullname        protoreflect.FullName = "google.protobuf.DescriptorProto.options"
 	DescriptorProto_ReservedRange_field_fullname  protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
 	DescriptorProto_ReservedName_field_fullname   protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+	DescriptorProto_Visibility_field_fullname     protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.DescriptorProto.
@@ -149,6 +167,7 @@ const (
 	DescriptorProto_Options_field_number        protoreflect.FieldNumber = 7
 	DescriptorProto_ReservedRange_field_number  protoreflect.FieldNumber = 9
 	DescriptorProto_ReservedName_field_number   protoreflect.FieldNumber = 10
+	DescriptorProto_Visibility_field_number     protoreflect.FieldNumber = 11
 )
 
 // Names for google.protobuf.DescriptorProto.ExtensionRange.
@@ -388,12 +407,14 @@ const (
 	EnumDescriptorProto_Options_field_name       protoreflect.Name = "options"
 	EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
 	EnumDescriptorProto_ReservedName_field_name  protoreflect.Name = "reserved_name"
+	EnumDescriptorProto_Visibility_field_name    protoreflect.Name = "visibility"
 
 	EnumDescriptorProto_Name_field_fullname          protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
 	EnumDescriptorProto_Value_field_fullname         protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
 	EnumDescriptorProto_Options_field_fullname       protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
 	EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
 	EnumDescriptorProto_ReservedName_field_fullname  protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+	EnumDescriptorProto_Visibility_field_fullname    protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.EnumDescriptorProto.
@@ -403,6 +424,7 @@ const (
 	EnumDescriptorProto_Options_field_number       protoreflect.FieldNumber = 3
 	EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
 	EnumDescriptorProto_ReservedName_field_number  protoreflect.FieldNumber = 5
+	EnumDescriptorProto_Visibility_field_number    protoreflect.FieldNumber = 6
 )
 
 // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
@@ -1008,29 +1030,35 @@ const (
 
 // Field names for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_name         protoreflect.Name = "field_presence"
-	FeatureSet_EnumType_field_name              protoreflect.Name = "enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_name        protoreflect.Name = "utf8_validation"
-	FeatureSet_MessageEncoding_field_name       protoreflect.Name = "message_encoding"
-	FeatureSet_JsonFormat_field_name            protoreflect.Name = "json_format"
-
-	FeatureSet_FieldPresence_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
-	FeatureSet_EnumType_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_fullname        protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
-	FeatureSet_MessageEncoding_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
-	FeatureSet_JsonFormat_field_fullname            protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+	FeatureSet_FieldPresence_field_name           protoreflect.Name = "field_presence"
+	FeatureSet_EnumType_field_name                protoreflect.Name = "enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_name   protoreflect.Name = "repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_name          protoreflect.Name = "utf8_validation"
+	FeatureSet_MessageEncoding_field_name         protoreflect.Name = "message_encoding"
+	FeatureSet_JsonFormat_field_name              protoreflect.Name = "json_format"
+	FeatureSet_EnforceNamingStyle_field_name      protoreflect.Name = "enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
+
+	FeatureSet_FieldPresence_field_fullname           protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+	FeatureSet_EnumType_field_fullname                protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_fullname   protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_fullname          protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+	FeatureSet_MessageEncoding_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+	FeatureSet_JsonFormat_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+	FeatureSet_EnforceNamingStyle_field_fullname      protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
 )
 
 // Field numbers for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_number         protoreflect.FieldNumber = 1
-	FeatureSet_EnumType_field_number              protoreflect.FieldNumber = 2
-	FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
-	FeatureSet_Utf8Validation_field_number        protoreflect.FieldNumber = 4
-	FeatureSet_MessageEncoding_field_number       protoreflect.FieldNumber = 5
-	FeatureSet_JsonFormat_field_number            protoreflect.FieldNumber = 6
+	FeatureSet_FieldPresence_field_number           protoreflect.FieldNumber = 1
+	FeatureSet_EnumType_field_number                protoreflect.FieldNumber = 2
+	FeatureSet_RepeatedFieldEncoding_field_number   protoreflect.FieldNumber = 3
+	FeatureSet_Utf8Validation_field_number          protoreflect.FieldNumber = 4
+	FeatureSet_MessageEncoding_field_number         protoreflect.FieldNumber = 5
+	FeatureSet_JsonFormat_field_number              protoreflect.FieldNumber = 6
+	FeatureSet_EnforceNamingStyle_field_number      protoreflect.FieldNumber = 7
+	FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
 )
 
 // Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1112,6 +1140,40 @@ const (
 	FeatureSet_LEGACY_BEST_EFFORT_enum_value  = 2
 )
 
+// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+	FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle"
+	FeatureSet_EnforceNamingStyle_enum_name     = "EnforceNamingStyle"
+)
+
+// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0
+	FeatureSet_STYLE2024_enum_value                    = 1
+	FeatureSet_STYLE_LEGACY_enum_value                 = 2
+)
+
+// Names for google.protobuf.FeatureSet.VisibilityFeature.
+const (
+	FeatureSet_VisibilityFeature_message_name     protoreflect.Name     = "VisibilityFeature"
+	FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
+)
+
+// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name     = "DefaultSymbolVisibility"
+)
+
+// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
+	FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value                        = 1
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value                  = 2
+	FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value                         = 3
+	FeatureSet_VisibilityFeature_STRICT_enum_value                            = 4
+)
+
 // Names for google.protobuf.FeatureSetDefaults.
 const (
 	FeatureSetDefaults_message_name     protoreflect.Name     = "FeatureSetDefaults"
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index 41c1f74ef8..bdad12a9bb 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -11,6 +11,7 @@ import (
 
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/order"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	piface "google.golang.org/protobuf/runtime/protoiface"
@@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
 		// permit us to skip over definitely-unset fields at marshal time.
 
 		var hasPresence bool
-		hasPresence, cf.isLazy = usePresenceForField(si, fd)
+		hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
 
 		if hasPresence {
 			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index dd55e8e009..5a439daacb 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -11,6 +11,7 @@ import (
 	"strings"
 	"sync/atomic"
 
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/reflect/protoreflect"
 )
 
@@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
 		fd := fds.Get(i)
 		fs := si.fieldsByNumber[fd.Number()]
 		var fi fieldInfo
-		usePresence, _ := usePresenceForField(si, fd)
+		usePresence, _ := filedesc.UsePresenceForField(fd)
 
 		switch {
 		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
@@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
 			if p.IsNil() {
 				return false
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return false
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			return rv.Elem().Len() > 0
 		},
 		clear: func(p pointer) {
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if !sp.IsNil() {
-				rv := sp.AsValueOf(fs.Type.Elem())
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if !rv.IsNil() {
 				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
 			}
 		},
@@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
 			if p.IsNil() {
 				return conv.Zero()
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return conv.Zero()
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			if rv.Elem().Len() == 0 {
 				return conv.Zero()
 			}
@@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
 func (mi *MessageInfo) present(p pointer, index uint32) bool {
 	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
 }
-
-// usePresenceForField implements the somewhat intricate logic of when
-// the presence bitmap is used for a field.  The main logic is that a
-// field that is optional or that can be lazy will use the presence
-// bit, but for proto2, also maps have a presence bit. It also records
-// if the field can ever be lazy, which is true if we have a
-// lazyOffset and the field is a message or a slice of messages. A
-// field that is lazy will always need a presence bit.  Oneofs are not
-// lazy and do not use presence, unless they are a synthetic oneof,
-// which is a proto3 optional field. For proto3 optionals, we use the
-// presence and they can also be lazy when applicable (a message).
-func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
-	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
-
-	// Non-oneof scalar fields with explicit field presence use the presence array.
-	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
-	switch {
-	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-		return false, false
-	case fd.IsMap():
-		return false, false
-	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
-		return hasLazyField, hasLazyField
-	default:
-		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
-	}
-}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go
index 914cb1deda..443afe81cd 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) {
 
 // Present checks for the presence of a specific field number in a presence set.
 func (p presence) Present(num uint32) bool {
-	if p.P == nil {
-		return false
-	}
 	return Export{}.Present(p.toElem(num), num)
 }
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
similarity index 99%
rename from hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
rename to hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
index 1ffddf6877..42dd6f70c6 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.21
-
 package strs
 
 import (
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
deleted file mode 100644
index 832a7988f1..0000000000
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package strs
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type (
-	stringHeader struct {
-		Data unsafe.Pointer
-		Len  int
-	}
-	sliceHeader struct {
-		Data unsafe.Pointer
-		Len  int
-		Cap  int
-	}
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) (s string) {
-	src := (*sliceHeader)(unsafe.Pointer(&b))
-	dst := (*stringHeader)(unsafe.Pointer(&s))
-	dst.Data = src.Data
-	dst.Len = src.Len
-	return s
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) (b []byte) {
-	src := (*stringHeader)(unsafe.Pointer(&s))
-	dst := (*sliceHeader)(unsafe.Pointer(&b))
-	dst.Data = src.Data
-	dst.Len = src.Len
-	dst.Cap = src.Len
-	return b
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
-	buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
-	n := len(prefix) + len(".") + len(name)
-	if len(prefix) == 0 {
-		n -= len(".")
-	}
-	sb.grow(n)
-	sb.buf = append(sb.buf, prefix...)
-	sb.buf = append(sb.buf, '.')
-	sb.buf = append(sb.buf, name...)
-	return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
-	sb.grow(len(b))
-	sb.buf = append(sb.buf, b...)
-	return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
-	if cap(sb.buf)-len(sb.buf) >= n {
-		return
-	}
-
-	// Unlike strings.Builder, we do not need to copy over the contents
-	// of the old buffer since our builder provides no API for
-	// retrieving previously created strings.
-	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
-	return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go b/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go
index 01efc33030..697d1c14f3 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
 const (
 	Major      = 1
 	Minor      = 36
-	Patch      = 5
+	Patch      = 8
 	PreRelease = ""
 )
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/proto/merge.go b/hack/tools/vendor/google.golang.org/protobuf/proto/merge.go
index 3c6fe57807..ef55b97dde 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/proto/merge.go
@@ -59,6 +59,12 @@ func Clone(m Message) Message {
 	return dst.Interface()
 }
 
+// CloneOf returns a deep copy of m. If the top-level message is invalid,
+// it returns an invalid message as well.
+func CloneOf[M Message](m M) M {
+	return Clone(m).(M)
+}
+
 // mergeOptions provides a namespace for merge functions, and can be
 // exported in the future if we add user-visible merge options.
 type mergeOptions struct{}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index ea154eec44..730331e666 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "public_dependency", nil)
 	case 11:
 		b = p.appendRepeatedField(b, "weak_dependency", nil)
+	case 15:
+		b = p.appendRepeatedField(b, "option_dependency", nil)
 	case 4:
 		b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
 	case 5:
@@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
 	case 10:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 11:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
 	case 5:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 6:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -398,6 +404,10 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte {
 		b = p.appendSingularField(b, "message_encoding", nil)
 	case 6:
 		b = p.appendSingularField(b, "json_format", nil)
+	case 7:
+		b = p.appendSingularField(b, "enforce_naming_style", nil)
+	case 8:
+		b = p.appendSingularField(b, "default_symbol_visibility", nil)
 	}
 	return b
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
similarity index 99%
rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
rename to hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
index 479527b58d..fe17f37220 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.21
-
 package protoreflect
 
 import (
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
deleted file mode 100644
index 0015fcb35d..0000000000
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package protoreflect
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/internal/pragma"
-)
-
-type (
-	stringHeader struct {
-		Data unsafe.Pointer
-		Len  int
-	}
-	sliceHeader struct {
-		Data unsafe.Pointer
-		Len  int
-		Cap  int
-	}
-	ifaceHeader struct {
-		Type unsafe.Pointer
-		Data unsafe.Pointer
-	}
-)
-
-var (
-	nilType     = typeOf(nil)
-	boolType    = typeOf(*new(bool))
-	int32Type   = typeOf(*new(int32))
-	int64Type   = typeOf(*new(int64))
-	uint32Type  = typeOf(*new(uint32))
-	uint64Type  = typeOf(*new(uint64))
-	float32Type = typeOf(*new(float32))
-	float64Type = typeOf(*new(float64))
-	stringType  = typeOf(*new(string))
-	bytesType   = typeOf(*new([]byte))
-	enumType    = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t any) unsafe.Pointer {
-	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
-	pragma.DoNotCompare // 0B
-
-	// typ stores the type of the value as a pointer to the Go type.
-	typ unsafe.Pointer // 8B
-
-	// ptr stores the data pointer for a String, Bytes, or interface value.
-	ptr unsafe.Pointer // 8B
-
-	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
-	// Enum value as a raw uint64.
-	//
-	// It is also used to store the length of a String or Bytes value;
-	// the capacity is ignored.
-	num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
-	p := (*stringHeader)(unsafe.Pointer(&v))
-	return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
-	p := (*sliceHeader)(unsafe.Pointer(&v))
-	return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfIface(v any) Value {
-	p := (*ifaceHeader)(unsafe.Pointer(&v))
-	return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() (x string) {
-	*(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
-	return x
-}
-func (v Value) getBytes() (x []byte) {
-	*(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
-	return x
-}
-func (v Value) getIface() (x any) {
-	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
-	return x
-}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index a516337674..4eacb523c3 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
 }
 
+// Describes the 'visibility' of a symbol with respect to the proto import
+// system. Symbols can only be imported when the visibility rules do not prevent
+// it (ex: local symbols cannot be imported).  Visibility modifiers can only set
+// on `message` and `enum` as they are the only types available to be referenced
+// from other files.
+type SymbolVisibility int32
+
+const (
+	SymbolVisibility_VISIBILITY_UNSET  SymbolVisibility = 0
+	SymbolVisibility_VISIBILITY_LOCAL  SymbolVisibility = 1
+	SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
+)
+
+// Enum value maps for SymbolVisibility.
+var (
+	SymbolVisibility_name = map[int32]string{
+		0: "VISIBILITY_UNSET",
+		1: "VISIBILITY_LOCAL",
+		2: "VISIBILITY_EXPORT",
+	}
+	SymbolVisibility_value = map[string]int32{
+		"VISIBILITY_UNSET":  0,
+		"VISIBILITY_LOCAL":  1,
+		"VISIBILITY_EXPORT": 2,
+	}
+)
+
+func (x SymbolVisibility) Enum() *SymbolVisibility {
+	p := new(SymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x SymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (SymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x SymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = SymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use SymbolVisibility.Descriptor instead.
+func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
 // The verification state of the extension range.
 type ExtensionRangeOptions_VerificationState int32
 
@@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
 }
 
 func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
 }
 
 func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+	return &file_google_protobuf_descriptor_proto_enumTypes[2]
 }
 
 func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string {
 }
 
 func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
 }
 
 func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[2]
+	return &file_google_protobuf_descriptor_proto_enumTypes[3]
 }
 
 func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string {
 }
 
 func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
 }
 
 func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[3]
+	return &file_google_protobuf_descriptor_proto_enumTypes[4]
 }
 
 func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string {
 }
 
 func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
 }
 
 func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[4]
+	return &file_google_protobuf_descriptor_proto_enumTypes[5]
 }
 
 func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string {
 }
 
 func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
 }
 
 func (FieldOptions_CType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[5]
+	return &file_google_protobuf_descriptor_proto_enumTypes[6]
 }
 
 func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string {
 }
 
 func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
 }
 
 func (FieldOptions_JSType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[6]
+	return &file_google_protobuf_descriptor_proto_enumTypes[7]
 }
 
 func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string {
 }
 
 func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
 }
 
 func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[7]
+	return &file_google_protobuf_descriptor_proto_enumTypes[8]
 }
 
 func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string {
 }
 
 func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
 }
 
 func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[8]
+	return &file_google_protobuf_descriptor_proto_enumTypes[9]
 }
 
 func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
 }
 
 func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
 }
 
 func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[9]
+	return &file_google_protobuf_descriptor_proto_enumTypes[10]
 }
 
 func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string {
 }
 
 func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
 }
 
 func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[10]
+	return &file_google_protobuf_descriptor_proto_enumTypes[11]
 }
 
 func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
@@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string {
 }
 
 func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
 }
 
 func (FeatureSet_EnumType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[11]
+	return &file_google_protobuf_descriptor_proto_enumTypes[12]
 }
 
 func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
@@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string {
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[12]
+	return &file_google_protobuf_descriptor_proto_enumTypes[13]
 }
 
 func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
@@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string {
 }
 
 func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
 }
 
 func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[13]
+	return &file_google_protobuf_descriptor_proto_enumTypes[14]
 }
 
 func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
@@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string {
 }
 
 func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
 }
 
 func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[14]
+	return &file_google_protobuf_descriptor_proto_enumTypes[15]
 }
 
 func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
@@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string {
 }
 
 func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
 }
 
 func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[15]
+	return &file_google_protobuf_descriptor_proto_enumTypes[16]
 }
 
 func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
@@ -1139,6 +1203,136 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
 }
 
+type FeatureSet_EnforceNamingStyle int32
+
+const (
+	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0
+	FeatureSet_STYLE2024                    FeatureSet_EnforceNamingStyle = 1
+	FeatureSet_STYLE_LEGACY                 FeatureSet_EnforceNamingStyle = 2
+)
+
+// Enum value maps for FeatureSet_EnforceNamingStyle.
+var (
+	FeatureSet_EnforceNamingStyle_name = map[int32]string{
+		0: "ENFORCE_NAMING_STYLE_UNKNOWN",
+		1: "STYLE2024",
+		2: "STYLE_LEGACY",
+	}
+	FeatureSet_EnforceNamingStyle_value = map[string]int32{
+		"ENFORCE_NAMING_STYLE_UNKNOWN": 0,
+		"STYLE2024":                    1,
+		"STYLE_LEGACY":                 2,
+	}
+)
+
+func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle {
+	p := new(FeatureSet_EnforceNamingStyle)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_EnforceNamingStyle) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
+}
+
+func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[17]
+}
+
+func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_EnforceNamingStyle(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead.
+func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
+}
+
+type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
+
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
+	// Default pre-EDITION_2024, all UNSET visibility are export.
+	FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
+	// All top-level symbols default to export, nested default to local.
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
+	// All symbols default to local.
+	FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
+	// All symbols local by default. Nested types cannot be exported.
+	// With special case caveat for message { enum {} reserved 1 to max; }
+	// This is the recommended setting for new protos.
+	FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
+)
+
+// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
+var (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
+		0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
+		1: "EXPORT_ALL",
+		2: "EXPORT_TOP_LEVEL",
+		3: "LOCAL_ALL",
+		4: "STRICT",
+	}
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
+		"DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
+		"EXPORT_ALL":                        1,
+		"EXPORT_TOP_LEVEL":                  2,
+		"LOCAL_ALL":                         3,
+		"STRICT":                            4,
+	}
+)
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[18]
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
+}
+
 // Represents the identified object's effect on the element in the original
 // .proto file.
 type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1177,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[16]
+	return &file_google_protobuf_descriptor_proto_enumTypes[19]
 }
 
 func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1262,6 +1456,9 @@ type FileDescriptorProto struct {
 	// Indexes of the weak imported files in the dependency list.
 	// For Google-internal migration only. Do not use.
 	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// Names of files imported by this file purely for the purpose of providing
+	// option extensions. These are excluded from the dependency list above.
+	OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
 	// All top-level definitions in this file.
 	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
 	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
@@ -1277,8 +1474,14 @@ type FileDescriptorProto struct {
 	// The supported values are "proto2", "proto3", and "editions".
 	//
 	// If `edition` is present, this value must be "editions".
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
 	// The edition of the proto file.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Edition       *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
@@ -1349,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 {
 	return nil
 }
 
+func (x *FileDescriptorProto) GetOptionDependency() []string {
+	if x != nil {
+		return x.OptionDependency
+	}
+	return nil
+}
+
 func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
 	if x != nil {
 		return x.MessageType
@@ -1419,7 +1629,9 @@ type DescriptorProto struct {
 	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved field names, which may not be used by fields in the same message.
 	// A given name may only be reserved once.
-	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1524,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string {
 	return nil
 }
 
+func (x *DescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 type ExtensionRangeOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The parser stores options it doesn't recognize here. See above.
@@ -1836,7 +2055,9 @@ type EnumDescriptorProto struct {
 	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved enum value names, which may not be reused. A given name may only
 	// be reserved once.
-	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1906,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
 	return nil
 }
 
+func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 // Describes a value within an enum.
 type EnumValueDescriptorProto struct {
 	state         protoimpl.MessageState `protogen:"open.v1"`
@@ -2212,6 +2440,9 @@ type FileOptions struct {
 	// determining the ruby package.
 	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here.
 	// See the documentation for the "Options" section above.
@@ -2482,6 +2713,9 @@ type MessageOptions struct {
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2639,7 +2873,10 @@ type FieldOptions struct {
 	// for accessors, or it will be completely ignored; in the very least, this
 	// is a formalization for deprecating fields.
 	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// DEPRECATED. DO NOT USE!
 	// For Google-internal migration only. Do not use.
+	//
+	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
 	// Indicate that the field value should not be printed out when using debug
 	// formats, e.g. when the field contains sensitive credentials.
@@ -2648,6 +2885,9 @@ type FieldOptions struct {
 	Targets         []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
 	EditionDefaults []*FieldOptions_EditionDefault  `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features       *FeatureSet                  `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
 	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
@@ -2740,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool {
 	return Default_FieldOptions_Deprecated
 }
 
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 func (x *FieldOptions) GetWeak() bool {
 	if x != nil && x.Weak != nil {
 		return *x.Weak
@@ -2799,6 +3040,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
 type OneofOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2871,6 +3115,9 @@ type EnumOptions struct {
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2958,6 +3205,9 @@ type EnumValueOptions struct {
 	// this is a formalization for deprecating enum values.
 	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
 	// Indicate that fields annotated with this enum value should not be printed
 	// out when using debug formats, e.g. when the field contains sensitive
@@ -3046,6 +3296,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
 type ServiceOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
 	// Is this service deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
@@ -3124,6 +3377,9 @@ type MethodOptions struct {
 	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -3303,16 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string {
 // be designed and implemented to handle this, hopefully before we ever hit a
 // conflict here.
 type FeatureSet struct {
-	state                 protoimpl.MessageState            `protogen:"open.v1"`
-	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
-	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
-	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
-	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
-	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
-	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
-	extensionFields       protoimpl.ExtensionFields
-	unknownFields         protoimpl.UnknownFields
-	sizeCache             protoimpl.SizeCache
+	state                   protoimpl.MessageState                                `protogen:"open.v1"`
+	FieldPresence           *FeatureSet_FieldPresence                             `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+	EnumType                *FeatureSet_EnumType                                  `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+	RepeatedFieldEncoding   *FeatureSet_RepeatedFieldEncoding                     `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+	Utf8Validation          *FeatureSet_Utf8Validation                            `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+	MessageEncoding         *FeatureSet_MessageEncoding                           `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+	JsonFormat              *FeatureSet_JsonFormat                                `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+	EnforceNamingStyle      *FeatureSet_EnforceNamingStyle                        `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+	DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
+	extensionFields         protoimpl.ExtensionFields
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
 }
 
 func (x *FeatureSet) Reset() {
@@ -3387,6 +3645,20 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
 	return FeatureSet_JSON_FORMAT_UNKNOWN
 }
 
+func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
+	if x != nil && x.EnforceNamingStyle != nil {
+		return *x.EnforceNamingStyle
+	}
+	return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
+}
+
+func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	if x != nil && x.DefaultSymbolVisibility != nil {
+		return *x.DefaultSymbolVisibility
+	}
+	return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
+}
+
 // A compiled specification for the defaults of a set of features.  These
 // messages are generated from FeatureSet extensions and can be used to seed
 // feature resolution. The resolution with this object becomes a simple search
@@ -4047,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
 	return false
 }
 
+type FeatureSet_VisibilityFeature struct {
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *FeatureSet_VisibilityFeature) Reset() {
+	*x = FeatureSet_VisibilityFeature{}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FeatureSet_VisibilityFeature) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
+
+func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
+func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
 // A map from every known edition with a unique set of defaults to its
 // defaults. Not all editions may be contained here.  For a given edition,
 // the defaults at the closest matching edition ordered at or before it should
@@ -4064,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
 	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4076,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
 func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4212,7 +4520,7 @@ type SourceCodeInfo_Location struct {
 
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4224,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string {
 func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4296,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct {
 
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4308,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
 func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4361,777 +4669,389 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio
 
 var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{
-	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
-	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
-	0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
-	0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
-	0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
-	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
-	0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
-	0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
-	0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
-	0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
-	0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
-	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
-	0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
-	0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
-	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
-	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
-	0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
-	0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
-	0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
-	0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
-	0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
-	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
-	0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
-	0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
-	0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
-	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
-	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
-	0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
-	0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
-	0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
-	0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
-	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
-	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
-	0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
-	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
-	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
-	0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
-	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
-	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
-	0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
-	0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
-	0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
-	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
-	0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
-	0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
-	0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
-	0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
-	0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
-	0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
-	0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
-	0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
-	0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
-	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
-	0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
-	0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
-	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
-	0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
-	0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
-	0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
-	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
-	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
-	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
-	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
-	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
-	0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
-	0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
-	0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
-	0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
-	0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
-	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
-	0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
-	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
-	0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
-	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
-	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
-	0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
-	0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
-	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
-	0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
-	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
-	0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
-	0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
-	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
-	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
-	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
-	0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
-	0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
-	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
-	0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
-	0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
-	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
-	0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
-	0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
-	0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
-	0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
-	0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
-	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
-	0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
-	0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
-	0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
-	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
-	0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
-	0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
-	0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
-	0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
-	0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
-	0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
-	0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
-	0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
-	0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
-	0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
-	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
-	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
-	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
-	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
-	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
-	0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
-	0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
-	0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
-	0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
-	0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
-	0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
-	0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
-	0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
-	0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
-	0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
-	0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
-	0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
-	0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
-	0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
-	0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
-	0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
-	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
-	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
-	0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
-	0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
-	0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
-	0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
-	0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
-	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
-	0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
-	0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
-	0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
-	0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
-	0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
-	0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
-	0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
-	0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
-	0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
-	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
-	0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
-	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
-	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
-	0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
-	0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
-	0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
-	0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
-	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
-	0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
-	0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
-	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
-	0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
-	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
-	0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
-	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
-	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
-	0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
-	0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
-	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
-	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
-	0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
-	0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
-	0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
-	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
-	0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
-	0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
-	0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
-	0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
-	0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
-	0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
-	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
-	0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
-	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
-	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
-	0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
-	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
-	0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
-	0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
-	0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
-	0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
-	0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
-	0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
-	0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
-	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
-	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
-	0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
-	0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
-	0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
-	0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
-	0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
-	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
-	0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
-	0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
-	0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
-	0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
-	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
-	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
-	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
-	0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
-	0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
-	0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
-	0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
-	0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
-	0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
-	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
-	0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
-	0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
-	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
-	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
-	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
-	0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
-	0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
-	0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
-	0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
-	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
-	0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
-	0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
-	0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
-	0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
-	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
-	0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
-	0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
-	0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
-	0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
-	0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
-	0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
-	0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
-	0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
-	0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
-	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
-	0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
-	0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
-	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
-	0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
-	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
-	0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
-	0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
-	0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
-	0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
-	0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
-	0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
-	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
-	0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
-	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
-	0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
-	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
-	0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
-	0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
-	0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
-	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
-	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
-	0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
-	0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
-	0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
-	0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
-	0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
-	0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
-	0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
-	0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
-	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
-	0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
-	0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
-	0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
-	0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
-	0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
-	0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
-	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
-	0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
-	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
-	0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
-	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
-	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
-	0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
-	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
-	0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
-	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
-	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
-	0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
-	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
-	0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
-	0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
-	0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
-	0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
-	0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
-	0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
-	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
-	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
-	0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
-	0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
-	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
-	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
-	0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
-	0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
-	0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
-	0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
-	0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
-	0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
-	0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
-	0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
-	0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
-	0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
-	0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
-	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
-	0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
-	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
-	0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
-	0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
-	0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
-	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
-	0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
-	0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
-	0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
-	0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
-	0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
-	0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
-	0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
-	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
-	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
-	0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
-	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
-	0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
-	0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
-	0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
-	0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
-	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
-	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
-	0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
-	0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
-	0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
-	0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
-	0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
-	0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
-	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
-	0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
-	0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-})
+const file_google_protobuf_descriptor_proto_rawDesc = "" +
+	"\n" +
+	" google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
+	"\x11FileDescriptorSet\x128\n" +
+	"\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
+	"\x13FileDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
+	"\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
+	"\n" +
+	"dependency\x18\x03 \x03(\tR\n" +
+	"dependency\x12+\n" +
+	"\x11public_dependency\x18\n" +
+	" \x03(\x05R\x10publicDependency\x12'\n" +
+	"\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
+	"\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
+	"\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
+	"\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
+	"\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
+	"\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" +
+	"\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
+	"\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
+	"\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
+	"\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
+	"\x0fDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
+	"\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
+	"\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" +
+	"\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" +
+	"nestedType\x12A\n" +
+	"\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" +
+	"\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" +
+	"\n" +
+	"oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" +
+	"\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
+	"\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
+	"\rreserved_name\x18\n" +
+	" \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1az\n" +
+	"\x0eExtensionRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
+	"\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" +
+	"\rReservedRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" +
+	"\x15ExtensionRangeOptions\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" +
+	"\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" +
+	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" +
+	"\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" +
+	"UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" +
+	"\vDeclaration\x12\x16\n" +
+	"\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" +
+	"\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" +
+	"\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" +
+	"\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" +
+	"\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" +
+	"\x11VerificationState\x12\x0f\n" +
+	"\vDECLARATION\x10\x00\x12\x0e\n" +
+	"\n" +
+	"UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" +
+	"\x14FieldDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+	"\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" +
+	"\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" +
+	"\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" +
+	"\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" +
+	"\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" +
+	"\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" +
+	"\voneof_index\x18\t \x01(\x05R\n" +
+	"oneofIndex\x12\x1b\n" +
+	"\tjson_name\x18\n" +
+	" \x01(\tR\bjsonName\x127\n" +
+	"\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" +
+	"\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" +
+	"\x04Type\x12\x0f\n" +
+	"\vTYPE_DOUBLE\x10\x01\x12\x0e\n" +
+	"\n" +
+	"TYPE_FLOAT\x10\x02\x12\x0e\n" +
+	"\n" +
+	"TYPE_INT64\x10\x03\x12\x0f\n" +
+	"\vTYPE_UINT64\x10\x04\x12\x0e\n" +
+	"\n" +
+	"TYPE_INT32\x10\x05\x12\x10\n" +
+	"\fTYPE_FIXED64\x10\x06\x12\x10\n" +
+	"\fTYPE_FIXED32\x10\a\x12\r\n" +
+	"\tTYPE_BOOL\x10\b\x12\x0f\n" +
+	"\vTYPE_STRING\x10\t\x12\x0e\n" +
+	"\n" +
+	"TYPE_GROUP\x10\n" +
+	"\x12\x10\n" +
+	"\fTYPE_MESSAGE\x10\v\x12\x0e\n" +
+	"\n" +
+	"TYPE_BYTES\x10\f\x12\x0f\n" +
+	"\vTYPE_UINT32\x10\r\x12\r\n" +
+	"\tTYPE_ENUM\x10\x0e\x12\x11\n" +
+	"\rTYPE_SFIXED32\x10\x0f\x12\x11\n" +
+	"\rTYPE_SFIXED64\x10\x10\x12\x0f\n" +
+	"\vTYPE_SINT32\x10\x11\x12\x0f\n" +
+	"\vTYPE_SINT64\x10\x12\"C\n" +
+	"\x05Label\x12\x12\n" +
+	"\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" +
+	"\x0eLABEL_REPEATED\x10\x03\x12\x12\n" +
+	"\x0eLABEL_REQUIRED\x10\x02\"c\n" +
+	"\x14OneofDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
+	"\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
+	"\x13EnumDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
+	"\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
+	"\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
+	"\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
+	"\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1a;\n" +
+	"\x11EnumReservedRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
+	"\x18EnumValueDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+	"\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
+	"\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" +
+	"\x16ServiceDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
+	"\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
+	"\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" +
+	"\x15MethodDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
+	"\n" +
+	"input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" +
+	"\voutput_type\x18\x03 \x01(\tR\n" +
+	"outputType\x128\n" +
+	"\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" +
+	"\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" +
+	"\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" +
+	"\vFileOptions\x12!\n" +
+	"\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" +
+	"\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" +
+	"\x13java_multiple_files\x18\n" +
+	" \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" +
+	"\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" +
+	"\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" +
+	"\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" +
+	"\n" +
+	"go_package\x18\v \x01(\tR\tgoPackage\x125\n" +
+	"\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" +
+	"\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" +
+	"\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" +
+	"\n" +
+	"deprecated\x18\x17 \x01(\b:\x05falseR\n" +
+	"deprecated\x12.\n" +
+	"\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" +
+	"\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" +
+	"\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" +
+	"\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" +
+	"\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" +
+	"\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" +
+	"\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" +
+	"\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" +
+	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" +
+	"\fOptimizeMode\x12\t\n" +
+	"\x05SPEED\x10\x01\x12\r\n" +
+	"\tCODE_SIZE\x10\x02\x12\x10\n" +
+	"\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" +
+	"\x0eMessageOptions\x12<\n" +
+	"\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" +
+	"\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12\x1b\n" +
+	"\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" +
+	"&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+	"\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
+	"\"\xa1\r\n" +
+	"\fFieldOptions\x12A\n" +
+	"\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
+	"\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
+	"\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" +
+	"\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" +
+	"\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12\x1d\n" +
+	"\x04weak\x18\n" +
+	" \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
+	"\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
+	"\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
+	"\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
+	"\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" +
+	"\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" +
+	"\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" +
+	"\x0eEditionDefault\x122\n" +
+	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" +
+	"\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" +
+	"\x0eFeatureSupport\x12G\n" +
+	"\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" +
+	"\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" +
+	"\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" +
+	"\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" +
+	"\x05CType\x12\n" +
+	"\n" +
+	"\x06STRING\x10\x00\x12\b\n" +
+	"\x04CORD\x10\x01\x12\x10\n" +
+	"\fSTRING_PIECE\x10\x02\"5\n" +
+	"\x06JSType\x12\r\n" +
+	"\tJS_NORMAL\x10\x00\x12\r\n" +
+	"\tJS_STRING\x10\x01\x12\r\n" +
+	"\tJS_NUMBER\x10\x02\"U\n" +
+	"\x0fOptionRetention\x12\x15\n" +
+	"\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" +
+	"\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" +
+	"\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" +
+	"\x10OptionTargetType\x12\x17\n" +
+	"\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" +
+	"\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" +
+	"\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" +
+	"\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" +
+	"\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" +
+	"\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" +
+	"\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" +
+	"\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" +
+	"\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" +
+	"\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" +
+	"\fOneofOptions\x127\n" +
+	"\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" +
+	"\vEnumOptions\x12\x1f\n" +
+	"\vallow_alias\x18\x02 \x01(\bR\n" +
+	"allowAlias\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12V\n" +
+	"&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+	"\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" +
+	"\x10EnumValueOptions\x12%\n" +
+	"\n" +
+	"deprecated\x18\x01 \x01(\b:\x05falseR\n" +
+	"deprecated\x127\n" +
+	"\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" +
+	"\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" +
+	"\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" +
+	"\x0eServiceOptions\x127\n" +
+	"\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" +
+	"\n" +
+	"deprecated\x18! \x01(\b:\x05falseR\n" +
+	"deprecated\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" +
+	"\rMethodOptions\x12%\n" +
+	"\n" +
+	"deprecated\x18! \x01(\b:\x05falseR\n" +
+	"deprecated\x12q\n" +
+	"\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" +
+	"\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" +
+	"\x10IdempotencyLevel\x12\x17\n" +
+	"\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" +
+	"\n" +
+	"IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" +
+	"\x13UninterpretedOption\x12A\n" +
+	"\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" +
+	"\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" +
+	"\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" +
+	"\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" +
+	"\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" +
+	"\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" +
+	"\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
+	"\bNamePart\x12\x1b\n" +
+	"\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
+	"\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
+	"\n" +
+	"FeatureSet\x12\x91\x01\n" +
+	"\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
+	"\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" +
+	"\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" +
+	"\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" +
+	"\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" +
+	"\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
+	"\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
+	"jsonFormat\x12\xab\x01\n" +
+	"\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
+	"\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
+	"EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
+	"\x11VisibilityFeature\"\x81\x01\n" +
+	"\x17DefaultSymbolVisibility\x12%\n" +
+	"!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
+	"\n" +
+	"EXPORT_ALL\x10\x01\x12\x14\n" +
+	"\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
+	"\tLOCAL_ALL\x10\x03\x12\n" +
+	"\n" +
+	"\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
+	"\rFieldPresence\x12\x1a\n" +
+	"\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
+	"\bEXPLICIT\x10\x01\x12\f\n" +
+	"\bIMPLICIT\x10\x02\x12\x13\n" +
+	"\x0fLEGACY_REQUIRED\x10\x03\"7\n" +
+	"\bEnumType\x12\x15\n" +
+	"\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" +
+	"\x04OPEN\x10\x01\x12\n" +
+	"\n" +
+	"\x06CLOSED\x10\x02\"V\n" +
+	"\x15RepeatedFieldEncoding\x12#\n" +
+	"\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" +
+	"\n" +
+	"\x06PACKED\x10\x01\x12\f\n" +
+	"\bEXPANDED\x10\x02\"I\n" +
+	"\x0eUtf8Validation\x12\x1b\n" +
+	"\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" +
+	"\n" +
+	"\x06VERIFY\x10\x02\x12\b\n" +
+	"\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" +
+	"\x0fMessageEncoding\x12\x1c\n" +
+	"\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" +
+	"\tDELIMITED\x10\x02\"H\n" +
+	"\n" +
+	"JsonFormat\x12\x17\n" +
+	"\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" +
+	"\x05ALLOW\x10\x01\x12\x16\n" +
+	"\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" +
+	"\x12EnforceNamingStyle\x12 \n" +
+	"\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" +
+	"\tSTYLE2024\x10\x01\x12\x10\n" +
+	"\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" +
+	"\x12FeatureSetDefaults\x12X\n" +
+	"\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" +
+	"\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" +
+	"\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" +
+	"\x18FeatureSetEditionDefault\x122\n" +
+	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" +
+	"\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" +
+	"\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" +
+	"\x0eSourceCodeInfo\x12D\n" +
+	"\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" +
+	"\bLocation\x12\x16\n" +
+	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" +
+	"\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" +
+	"\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" +
+	"\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" +
+	"\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" +
+	"\x11GeneratedCodeInfo\x12M\n" +
+	"\n" +
+	"annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" +
+	"annotation\x1a\xeb\x01\n" +
+	"\n" +
+	"Annotation\x12\x16\n" +
+	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" +
+	"\vsource_file\x18\x02 \x01(\tR\n" +
+	"sourceFile\x12\x14\n" +
+	"\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" +
+	"\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" +
+	"\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" +
+	"\bSemantic\x12\b\n" +
+	"\x04NONE\x10\x00\x12\a\n" +
+	"\x03SET\x10\x01\x12\t\n" +
+	"\x05ALIAS\x10\x02*\xa7\x02\n" +
+	"\aEdition\x12\x13\n" +
+	"\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
+	"\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
+	"\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
+	"\fEDITION_2023\x10\xe8\a\x12\x11\n" +
+	"\fEDITION_2024\x10\xe9\a\x12\x17\n" +
+	"\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
+	"\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
+	"\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
+	"\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
+	"\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
+	"\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
+	"\x10SymbolVisibility\x12\x14\n" +
+	"\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
+	"\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
+	"\x11VISIBILITY_EXPORT\x10\x02B~\n" +
+	"\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
 
 var (
 	file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
@@ -5145,143 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
 	return file_google_protobuf_descriptor_proto_rawDescData
 }
 
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
 var file_google_protobuf_descriptor_proto_goTypes = []any{
-	(Edition)(0), // 0: google.protobuf.Edition
-	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
-	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
-	(FieldDescriptorProto_Label)(0),                     // 3: google.protobuf.FieldDescriptorProto.Label
-	(FileOptions_OptimizeMode)(0),                       // 4: google.protobuf.FileOptions.OptimizeMode
-	(FieldOptions_CType)(0),                             // 5: google.protobuf.FieldOptions.CType
-	(FieldOptions_JSType)(0),                            // 6: google.protobuf.FieldOptions.JSType
-	(FieldOptions_OptionRetention)(0),                   // 7: google.protobuf.FieldOptions.OptionRetention
-	(FieldOptions_OptionTargetType)(0),                  // 8: google.protobuf.FieldOptions.OptionTargetType
-	(MethodOptions_IdempotencyLevel)(0),                 // 9: google.protobuf.MethodOptions.IdempotencyLevel
-	(FeatureSet_FieldPresence)(0),                       // 10: google.protobuf.FeatureSet.FieldPresence
-	(FeatureSet_EnumType)(0),                            // 11: google.protobuf.FeatureSet.EnumType
-	(FeatureSet_RepeatedFieldEncoding)(0),               // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
-	(FeatureSet_Utf8Validation)(0),                      // 13: google.protobuf.FeatureSet.Utf8Validation
-	(FeatureSet_MessageEncoding)(0),                     // 14: google.protobuf.FeatureSet.MessageEncoding
-	(FeatureSet_JsonFormat)(0),                          // 15: google.protobuf.FeatureSet.JsonFormat
-	(GeneratedCodeInfo_Annotation_Semantic)(0),          // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	(*FileDescriptorSet)(nil),                           // 17: google.protobuf.FileDescriptorSet
-	(*FileDescriptorProto)(nil),                         // 18: google.protobuf.FileDescriptorProto
-	(*DescriptorProto)(nil),                             // 19: google.protobuf.DescriptorProto
-	(*ExtensionRangeOptions)(nil),                       // 20: google.protobuf.ExtensionRangeOptions
-	(*FieldDescriptorProto)(nil),                        // 21: google.protobuf.FieldDescriptorProto
-	(*OneofDescriptorProto)(nil),                        // 22: google.protobuf.OneofDescriptorProto
-	(*EnumDescriptorProto)(nil),                         // 23: google.protobuf.EnumDescriptorProto
-	(*EnumValueDescriptorProto)(nil),                    // 24: google.protobuf.EnumValueDescriptorProto
-	(*ServiceDescriptorProto)(nil),                      // 25: google.protobuf.ServiceDescriptorProto
-	(*MethodDescriptorProto)(nil),                       // 26: google.protobuf.MethodDescriptorProto
-	(*FileOptions)(nil),                                 // 27: google.protobuf.FileOptions
-	(*MessageOptions)(nil),                              // 28: google.protobuf.MessageOptions
-	(*FieldOptions)(nil),                                // 29: google.protobuf.FieldOptions
-	(*OneofOptions)(nil),                                // 30: google.protobuf.OneofOptions
-	(*EnumOptions)(nil),                                 // 31: google.protobuf.EnumOptions
-	(*EnumValueOptions)(nil),                            // 32: google.protobuf.EnumValueOptions
-	(*ServiceOptions)(nil),                              // 33: google.protobuf.ServiceOptions
-	(*MethodOptions)(nil),                               // 34: google.protobuf.MethodOptions
-	(*UninterpretedOption)(nil),                         // 35: google.protobuf.UninterpretedOption
-	(*FeatureSet)(nil),                                  // 36: google.protobuf.FeatureSet
-	(*FeatureSetDefaults)(nil),                          // 37: google.protobuf.FeatureSetDefaults
-	(*SourceCodeInfo)(nil),                              // 38: google.protobuf.SourceCodeInfo
-	(*GeneratedCodeInfo)(nil),                           // 39: google.protobuf.GeneratedCodeInfo
-	(*DescriptorProto_ExtensionRange)(nil),              // 40: google.protobuf.DescriptorProto.ExtensionRange
-	(*DescriptorProto_ReservedRange)(nil),               // 41: google.protobuf.DescriptorProto.ReservedRange
-	(*ExtensionRangeOptions_Declaration)(nil),           // 42: google.protobuf.ExtensionRangeOptions.Declaration
-	(*EnumDescriptorProto_EnumReservedRange)(nil),       // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
-	(*FieldOptions_EditionDefault)(nil),                 // 44: google.protobuf.FieldOptions.EditionDefault
-	(*FieldOptions_FeatureSupport)(nil),                 // 45: google.protobuf.FieldOptions.FeatureSupport
-	(*UninterpretedOption_NamePart)(nil),                // 46: google.protobuf.UninterpretedOption.NamePart
-	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	(*SourceCodeInfo_Location)(nil),                     // 48: google.protobuf.SourceCodeInfo.Location
-	(*GeneratedCodeInfo_Annotation)(nil),                // 49: google.protobuf.GeneratedCodeInfo.Annotation
+	(Edition)(0),          // 0: google.protobuf.Edition
+	(SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
+	(ExtensionRangeOptions_VerificationState)(0),              // 2: google.protobuf.ExtensionRangeOptions.VerificationState
+	(FieldDescriptorProto_Type)(0),                            // 3: google.protobuf.FieldDescriptorProto.Type
+	(FieldDescriptorProto_Label)(0),                           // 4: google.protobuf.FieldDescriptorProto.Label
+	(FileOptions_OptimizeMode)(0),                             // 5: google.protobuf.FileOptions.OptimizeMode
+	(FieldOptions_CType)(0),                                   // 6: google.protobuf.FieldOptions.CType
+	(FieldOptions_JSType)(0),                                  // 7: google.protobuf.FieldOptions.JSType
+	(FieldOptions_OptionRetention)(0),                         // 8: google.protobuf.FieldOptions.OptionRetention
+	(FieldOptions_OptionTargetType)(0),                        // 9: google.protobuf.FieldOptions.OptionTargetType
+	(MethodOptions_IdempotencyLevel)(0),                       // 10: google.protobuf.MethodOptions.IdempotencyLevel
+	(FeatureSet_FieldPresence)(0),                             // 11: google.protobuf.FeatureSet.FieldPresence
+	(FeatureSet_EnumType)(0),                                  // 12: google.protobuf.FeatureSet.EnumType
+	(FeatureSet_RepeatedFieldEncoding)(0),                     // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
+	(FeatureSet_Utf8Validation)(0),                            // 14: google.protobuf.FeatureSet.Utf8Validation
+	(FeatureSet_MessageEncoding)(0),                           // 15: google.protobuf.FeatureSet.MessageEncoding
+	(FeatureSet_JsonFormat)(0),                                // 16: google.protobuf.FeatureSet.JsonFormat
+	(FeatureSet_EnforceNamingStyle)(0),                        // 17: google.protobuf.FeatureSet.EnforceNamingStyle
+	(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	(GeneratedCodeInfo_Annotation_Semantic)(0),                // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	(*FileDescriptorSet)(nil),                                 // 20: google.protobuf.FileDescriptorSet
+	(*FileDescriptorProto)(nil),                               // 21: google.protobuf.FileDescriptorProto
+	(*DescriptorProto)(nil),                                   // 22: google.protobuf.DescriptorProto
+	(*ExtensionRangeOptions)(nil),                             // 23: google.protobuf.ExtensionRangeOptions
+	(*FieldDescriptorProto)(nil),                              // 24: google.protobuf.FieldDescriptorProto
+	(*OneofDescriptorProto)(nil),                              // 25: google.protobuf.OneofDescriptorProto
+	(*EnumDescriptorProto)(nil),                               // 26: google.protobuf.EnumDescriptorProto
+	(*EnumValueDescriptorProto)(nil),                          // 27: google.protobuf.EnumValueDescriptorProto
+	(*ServiceDescriptorProto)(nil),                            // 28: google.protobuf.ServiceDescriptorProto
+	(*MethodDescriptorProto)(nil),                             // 29: google.protobuf.MethodDescriptorProto
+	(*FileOptions)(nil),                                       // 30: google.protobuf.FileOptions
+	(*MessageOptions)(nil),                                    // 31: google.protobuf.MessageOptions
+	(*FieldOptions)(nil),                                      // 32: google.protobuf.FieldOptions
+	(*OneofOptions)(nil),                                      // 33: google.protobuf.OneofOptions
+	(*EnumOptions)(nil),                                       // 34: google.protobuf.EnumOptions
+	(*EnumValueOptions)(nil),                                  // 35: google.protobuf.EnumValueOptions
+	(*ServiceOptions)(nil),                                    // 36: google.protobuf.ServiceOptions
+	(*MethodOptions)(nil),                                     // 37: google.protobuf.MethodOptions
+	(*UninterpretedOption)(nil),                               // 38: google.protobuf.UninterpretedOption
+	(*FeatureSet)(nil),                                        // 39: google.protobuf.FeatureSet
+	(*FeatureSetDefaults)(nil),                                // 40: google.protobuf.FeatureSetDefaults
+	(*SourceCodeInfo)(nil),                                    // 41: google.protobuf.SourceCodeInfo
+	(*GeneratedCodeInfo)(nil),                                 // 42: google.protobuf.GeneratedCodeInfo
+	(*DescriptorProto_ExtensionRange)(nil),                    // 43: google.protobuf.DescriptorProto.ExtensionRange
+	(*DescriptorProto_ReservedRange)(nil),                     // 44: google.protobuf.DescriptorProto.ReservedRange
+	(*ExtensionRangeOptions_Declaration)(nil),                 // 45: google.protobuf.ExtensionRangeOptions.Declaration
+	(*EnumDescriptorProto_EnumReservedRange)(nil),             // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
+	(*FieldOptions_EditionDefault)(nil),                       // 47: google.protobuf.FieldOptions.EditionDefault
+	(*FieldOptions_FeatureSupport)(nil),                       // 48: google.protobuf.FieldOptions.FeatureSupport
+	(*UninterpretedOption_NamePart)(nil),                      // 49: google.protobuf.UninterpretedOption.NamePart
+	(*FeatureSet_VisibilityFeature)(nil),                      // 50: google.protobuf.FeatureSet.VisibilityFeature
+	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil),       // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	(*SourceCodeInfo_Location)(nil),                           // 52: google.protobuf.SourceCodeInfo.Location
+	(*GeneratedCodeInfo_Annotation)(nil),                      // 53: google.protobuf.GeneratedCodeInfo.Annotation
 }
 var file_google_protobuf_descriptor_proto_depIdxs = []int32{
-	18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
-	19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
-	23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
-	21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
-	38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+	21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+	22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+	26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+	24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+	41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
 	0,  // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
-	21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
-	21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
-	23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
-	22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
-	28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
-	41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
-	35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
-	36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
-	1,  // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
-	3,  // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
-	2,  // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
-	29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
-	30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
-	24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
-	31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
-	43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
-	32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
-	26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
-	33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
-	34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
-	4,  // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
-	36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	5,  // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
-	6,  // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
-	7,  // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
-	8,  // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
-	44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
-	36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
-	45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
-	45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	9,  // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
-	36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
-	10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
-	11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
-	12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
-	13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
-	14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
-	15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
-	47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	0,  // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
-	0,  // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
-	48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
-	49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
-	20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
-	0,  // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
-	0,  // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
-	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
-	0,  // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
-	0,  // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
-	36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
-	36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
-	16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	77, // [77:77] is the sub-list for method output_type
-	77, // [77:77] is the sub-list for method input_type
-	77, // [77:77] is the sub-list for extension type_name
-	77, // [77:77] is the sub-list for extension extendee
-	0,  // [0:77] is the sub-list for field type_name
+	24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+	24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+	26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+	25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+	31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+	44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+	1,  // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+	39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+	2,  // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+	4,  // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+	3,  // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+	32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+	33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+	27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+	34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+	46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+	1,  // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+	29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+	36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+	37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+	5,  // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+	39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	6,  // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+	7,  // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+	8,  // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+	9,  // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+	47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+	39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+	39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+	11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+	12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+	13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+	14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+	15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+	16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+	17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+	18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	0,  // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+	0,  // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+	52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+	53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+	23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+	0,  // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+	0,  // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+	0,  // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+	0,  // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+	0,  // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+	39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+	39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+	19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	81, // [81:81] is the sub-list for method output_type
+	81, // [81:81] is the sub-list for method input_type
+	81, // [81:81] is the sub-list for extension type_name
+	81, // [81:81] is the sub-list for extension extendee
+	0,  // [0:81] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5294,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() {
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
-			NumEnums:      17,
-			NumMessages:   33,
+			NumEnums:      20,
+			NumMessages:   34,
 			NumExtensions: 0,
 			NumServices:   0,
 		},
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index 28d24bad79..37e712b6b7 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -228,63 +228,29 @@ var (
 
 var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_go_features_proto_rawDesc = string([]byte{
-	0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
-	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
-	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
-	0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
-	0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
-	0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
-	0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
-	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
-	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
-	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69,
-	0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70,
-	0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49,
-	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01,
-	0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
-	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f,
-	0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2,
-	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
-	0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72,
-	0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e,
-	0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70,
-	0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74,
-	0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a,
-	0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49,
-	0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
-	0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e,
-	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44,
-	0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45,
-	0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d,
-	0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f,
-	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50,
-	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45,
-	0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52,
-	0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54,
-	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
-	0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
-})
+const file_google_protobuf_go_features_proto_rawDesc = "" +
+	"\n" +
+	"!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" +
+	"\n" +
+	"GoFeatures\x12\xbe\x01\n" +
+	"\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" +
+	"\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" +
+	"\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" +
+	"API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" +
+	"\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" +
+	"\bAPILevel\x12\x19\n" +
+	"\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" +
+	"\bAPI_OPEN\x10\x01\x12\x0e\n" +
+	"\n" +
+	"API_HYBRID\x10\x02\x12\x0e\n" +
+	"\n" +
+	"API_OPAQUE\x10\x03\"\x92\x01\n" +
+	"\x0fStripEnumPrefix\x12!\n" +
+	"\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" +
+	"\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" +
+	"\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" +
+	"\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" +
+	"\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb"
 
 var (
 	file_google_protobuf_go_features_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 497da66e91..1ff0d1494d 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte {
 
 var File_google_protobuf_any_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_any_proto_rawDesc = string([]byte{
-	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
-	0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
-	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
-	0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
-	0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_any_proto_rawDesc = "" +
+	"\n" +
+	"\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" +
+	"\x03Any\x12\x19\n" +
+	"\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" +
+	"\x05value\x18\x02 \x01(\fR\x05valueBv\n" +
+	"\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_any_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 193880d181..ca2e7b38f4 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -289,24 +289,13 @@ func (x *Duration) GetNanos() int32 {
 
 var File_google_protobuf_duration_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_duration_proto_rawDesc = string([]byte{
-	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
-	0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
-	0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
-	0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
-	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
-	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
-	0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
-	0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_duration_proto_rawDesc = "" +
+	"\n" +
+	"\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" +
+	"\bDuration\x12\x18\n" +
+	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" +
+	"\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_duration_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index a5b8657c4b..1d7ee3b476 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -86,20 +86,12 @@ func (*Empty) Descriptor() ([]byte, []int) {
 
 var File_google_protobuf_empty_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_empty_proto_rawDesc = string([]byte{
-	0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
-	0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
-	0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
-	0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
-	0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
-	0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_empty_proto_rawDesc = "" +
+	"\n" +
+	"\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" +
+	"\x05EmptyB}\n" +
+	"\x13com.google.protobufB\n" +
+	"EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_empty_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index 041feb0f3e..91ee89a5cd 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -504,23 +504,12 @@ func (x *FieldMask) GetPaths() []string {
 
 var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{
-	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
-	0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
-	0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
-	0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
-	0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
-	0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
-	0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
-	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_field_mask_proto_rawDesc = "" +
+	"\n" +
+	" google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" +
+	"\tFieldMask\x12\x14\n" +
+	"\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" +
+	"\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index ecdd31ab53..30411b7283 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -672,55 +672,31 @@ func (x *ListValue) GetValues() []*Value {
 
 var File_google_protobuf_struct_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_struct_proto_rawDesc = string([]byte{
-	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
-	0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
-	0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
-	0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
-	0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
-	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
-	0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
-	0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
-	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
-	0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
-	0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
-	0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
-	0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
-	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
-	0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
-	0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
-	0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
-	0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
-	0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
-	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
-	0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
-	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
-	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x33,
-})
+const file_google_protobuf_struct_proto_rawDesc = "" +
+	"\n" +
+	"\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" +
+	"\x06Struct\x12;\n" +
+	"\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" +
+	"\vFieldsEntry\x12\x10\n" +
+	"\x03key\x18\x01 \x01(\tR\x03key\x12,\n" +
+	"\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" +
+	"\x05Value\x12;\n" +
+	"\n" +
+	"null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" +
+	"\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" +
+	"\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" +
+	"\n" +
+	"bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" +
+	"\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" +
+	"\n" +
+	"list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" +
+	"\x04kind\";\n" +
+	"\tListValue\x12.\n" +
+	"\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" +
+	"\tNullValue\x12\x0e\n" +
+	"\n" +
+	"NULL_VALUE\x10\x00B\x7f\n" +
+	"\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_struct_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 00ac835c0b..06d584c14b 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 {
 
 var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{
-	0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
-	0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
-	0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
-	0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
-	0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
-	0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
-	0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
-	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
-	0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_timestamp_proto_rawDesc = "" +
+	"\n" +
+	"\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
+	"\tTimestamp\x12\x18\n" +
+	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
+	"\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 5de5301063..b7c2d0607d 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -28,10 +28,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
+// Wrappers for primitive (non-message) types. These types were needed
+// for legacy reasons and are not recommended for use in new APIs.
+//
+// Historically these wrappers were useful to have presence on proto3 primitive
+// fields, but proto3 syntax has been updated to support the `optional` keyword.
+// Using that keyword is now the strongly preferred way to add presence to
+// proto3 primitive fields.
+//
+// A secondary usecase was to embed primitives in the `google.protobuf.Any`
+// type: it is now recommended that you embed your value in your own wrapper
+// message which can be specifically documented.
 //
 // These wrappers have no meaningful use within repeated fields as they lack
 // the ability to detect presence on individual elements.
@@ -54,6 +61,9 @@ import (
 // Wrapper message for `double`.
 //
 // The JSON representation for `DoubleValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type DoubleValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The double value.
@@ -107,6 +117,9 @@ func (x *DoubleValue) GetValue() float64 {
 // Wrapper message for `float`.
 //
 // The JSON representation for `FloatValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type FloatValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The float value.
@@ -160,6 +173,9 @@ func (x *FloatValue) GetValue() float32 {
 // Wrapper message for `int64`.
 //
 // The JSON representation for `Int64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type Int64Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int64 value.
@@ -213,6 +229,9 @@ func (x *Int64Value) GetValue() int64 {
 // Wrapper message for `uint64`.
 //
 // The JSON representation for `UInt64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type UInt64Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint64 value.
@@ -266,6 +285,9 @@ func (x *UInt64Value) GetValue() uint64 {
 // Wrapper message for `int32`.
 //
 // The JSON representation for `Int32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type Int32Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int32 value.
@@ -319,6 +341,9 @@ func (x *Int32Value) GetValue() int32 {
 // Wrapper message for `uint32`.
 //
 // The JSON representation for `UInt32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type UInt32Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint32 value.
@@ -372,6 +397,9 @@ func (x *UInt32Value) GetValue() uint32 {
 // Wrapper message for `bool`.
 //
 // The JSON representation for `BoolValue` is JSON `true` and `false`.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type BoolValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bool value.
@@ -425,6 +453,9 @@ func (x *BoolValue) GetValue() bool {
 // Wrapper message for `string`.
 //
 // The JSON representation for `StringValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type StringValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The string value.
@@ -478,6 +509,9 @@ func (x *StringValue) GetValue() string {
 // Wrapper message for `bytes`.
 //
 // The JSON representation for `BytesValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type BytesValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bytes value.
@@ -530,41 +564,32 @@ func (x *BytesValue) GetValue() []byte {
 
 var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{
-	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
-	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
-	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
-	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
-	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
-	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
-	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
-	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
-	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
-	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
-	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
-	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
-	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
-	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_wrappers_proto_rawDesc = "" +
+	"\n" +
+	"\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" +
+	"\vDoubleValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" +
+	"\n" +
+	"FloatValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" +
+	"\n" +
+	"Int64Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x03R\x05value\"#\n" +
+	"\vUInt64Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" +
+	"\n" +
+	"Int32Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x05R\x05value\"#\n" +
+	"\vUInt32Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\rR\x05value\"!\n" +
+	"\tBoolValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\bR\x05value\"#\n" +
+	"\vStringValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\tR\x05value\"\"\n" +
+	"\n" +
+	"BytesValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" +
+	"\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
diff --git a/hack/tools/vendor/k8s.io/client-go/util/cert/cert.go b/hack/tools/vendor/k8s.io/client-go/util/cert/cert.go
index 91e171271a..4805d09ab5 100644
--- a/hack/tools/vendor/k8s.io/client-go/util/cert/cert.go
+++ b/hack/tools/vendor/k8s.io/client-go/util/cert/cert.go
@@ -75,13 +75,15 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro
 			CommonName:   cfg.CommonName,
 			Organization: cfg.Organization,
 		},
-		DNSNames:              []string{cfg.CommonName},
 		NotBefore:             notBefore,
 		NotAfter:              now.Add(duration365d * 10).UTC(),
 		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
 		BasicConstraintsValid: true,
 		IsCA:                  true,
 	}
+	if len(cfg.CommonName) > 0 {
+		tmpl.DNSNames = []string{cfg.CommonName}
+	}
 
 	certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
 	if err != nil {
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go
index 0ce85af9f1..081dae306f 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go
@@ -285,10 +285,7 @@ func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error {
 			sortParameters(pathItem.Parameters)
 
 			for _, route := range routes {
-				op, err := o.buildOperations(route, inPathCommonParamsMap)
-				if err != nil {
-					return err
-				}
+				op, _ := o.buildOperations(route, inPathCommonParamsMap)
 				sortParameters(op.Parameters)
 
 				switch strings.ToUpper(route.Method()) {
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go
index a8af60b6cf..7f0fe985a6 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go
@@ -20,11 +20,9 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
-	"reflect"
 	"regexp"
 	"strconv"
 	"strings"
-	"sync"
 
 	"k8s.io/gengo/v2/types"
 	openapi "k8s.io/kube-openapi/pkg/common"
@@ -63,34 +61,6 @@ func (c *CELTag) Validate() error {
 	return nil
 }
 
-// isKnownTagCommentKey returns true if the given key is a known comment tag key.
-// Known keys are identified by the json field tags in the commentTags struct.
-// If the key is a composite key, only the first key part is checked, and is
-// expected to be separated by the remainder of the key by a ':' or '[' delimiter.
-func isKnownTagCommentKey(key string) bool {
-	split := func(r rune) bool { return r == ':' || r == '[' }
-	commentTags := strings.FieldsFunc(key, split)
-	if len(commentTags) == 0 {
-		return false
-	}
-	_, ok := tagKeys()[commentTags[0]]
-	return ok
-}
-
-var tagKeys = sync.OnceValue(func() map[string]struct{} {
-	result := map[string]struct{}{}
-	t := reflect.TypeOf(commentTags{})
-	for i := 0; i < t.NumField(); i++ {
-		field := t.Field(i)
-		if jsonTag := field.Tag.Get("json"); jsonTag != "" {
-			if key, _, _ := strings.Cut(jsonTag, ","); key != "" {
-				result[key] = struct{}{}
-			}
-		}
-	}
-	return result
-})
-
 // commentTags represents the parsed comment tags for a given type. These types are then used to generate schema validations.
 // These only include the newer prefixed tags. The older tags are still supported,
 // but are not included in this struct. Comment Tags are transformed into a
@@ -105,29 +75,7 @@ var tagKeys = sync.OnceValue(func() map[string]struct{} {
 // - +listMapKeys
 // - +mapType
 type commentTags struct {
-	Nullable         *bool         `json:"nullable,omitempty"`
-	Format           *string       `json:"format,omitempty"`
-	Maximum          *float64      `json:"maximum,omitempty"`
-	ExclusiveMaximum *bool         `json:"exclusiveMaximum,omitempty"`
-	Minimum          *float64      `json:"minimum,omitempty"`
-	ExclusiveMinimum *bool         `json:"exclusiveMinimum,omitempty"`
-	MaxLength        *int64        `json:"maxLength,omitempty"`
-	MinLength        *int64        `json:"minLength,omitempty"`
-	Pattern          *string       `json:"pattern,omitempty"`
-	MaxItems         *int64        `json:"maxItems,omitempty"`
-	MinItems         *int64        `json:"minItems,omitempty"`
-	UniqueItems      *bool         `json:"uniqueItems,omitempty"`
-	MultipleOf       *float64      `json:"multipleOf,omitempty"`
-	Enum             []interface{} `json:"enum,omitempty"`
-	MaxProperties    *int64        `json:"maxProperties,omitempty"`
-	MinProperties    *int64        `json:"minProperties,omitempty"`
-
-	// Nested commentTags for extending the schemas of subfields at point-of-use
-	// when you cant annotate them directly. Cannot be used to add properties
-	// or remove validations on the overridden schema.
-	Items                *commentTags            `json:"items,omitempty"`
-	Properties           map[string]*commentTags `json:"properties,omitempty"`
-	AdditionalProperties *commentTags            `json:"additionalProperties,omitempty"`
+	spec.SchemaProps
 
 	CEL []CELTag `json:"cel,omitempty"`
 
@@ -138,75 +86,9 @@ type commentTags struct {
 
 // Returns the schema for the given CommentTags instance.
 // This is the final authoritative schema for the comment tags
-func (c *commentTags) ValidationSchema() (*spec.Schema, error) {
-	if c == nil {
-		return nil, nil
-	}
-
-	isNullable := c.Nullable != nil && *c.Nullable
-	format := ""
-	if c.Format != nil {
-		format = *c.Format
-	}
-	isExclusiveMaximum := c.ExclusiveMaximum != nil && *c.ExclusiveMaximum
-	isExclusiveMinimum := c.ExclusiveMinimum != nil && *c.ExclusiveMinimum
-	isUniqueItems := c.UniqueItems != nil && *c.UniqueItems
-	pattern := ""
-	if c.Pattern != nil {
-		pattern = *c.Pattern
-	}
-
-	var transformedItems *spec.SchemaOrArray
-	var transformedProperties map[string]spec.Schema
-	var transformedAdditionalProperties *spec.SchemaOrBool
-
-	if c.Items != nil {
-		items, err := c.Items.ValidationSchema()
-		if err != nil {
-			return nil, fmt.Errorf("failed to transform items: %w", err)
-		}
-		transformedItems = &spec.SchemaOrArray{Schema: items}
-	}
-
-	if c.Properties != nil {
-		properties := make(map[string]spec.Schema)
-		for key, value := range c.Properties {
-			property, err := value.ValidationSchema()
-			if err != nil {
-				return nil, fmt.Errorf("failed to transform property %q: %w", key, err)
-			}
-			properties[key] = *property
-		}
-		transformedProperties = properties
-	}
-
-	if c.AdditionalProperties != nil {
-		additionalProperties, err := c.AdditionalProperties.ValidationSchema()
-		if err != nil {
-			return nil, fmt.Errorf("failed to transform additionalProperties: %w", err)
-		}
-		transformedAdditionalProperties = &spec.SchemaOrBool{Schema: additionalProperties, Allows: true}
-	}
-
+func (c commentTags) ValidationSchema() (*spec.Schema, error) {
 	res := spec.Schema{
-		SchemaProps: spec.SchemaProps{
-			Nullable:         isNullable,
-			Format:           format,
-			Maximum:          c.Maximum,
-			ExclusiveMaximum: isExclusiveMaximum,
-			Minimum:          c.Minimum,
-			ExclusiveMinimum: isExclusiveMinimum,
-			MaxLength:        c.MaxLength,
-			MinLength:        c.MinLength,
-			Pattern:          pattern,
-			MaxItems:         c.MaxItems,
-			MinItems:         c.MinItems,
-			UniqueItems:      isUniqueItems,
-			MultipleOf:       c.MultipleOf,
-			Enum:             c.Enum,
-			MaxProperties:    c.MaxProperties,
-			MinProperties:    c.MinProperties,
-		},
+		SchemaProps: c.SchemaProps,
 	}
 
 	if len(c.CEL) > 0 {
@@ -223,18 +105,6 @@ func (c *commentTags) ValidationSchema() (*spec.Schema, error) {
 		res.VendorExtensible.AddExtension("x-kubernetes-validations", celTagMap)
 	}
 
-	// Dont add structural properties directly to this schema. This schema
-	// is used only for validation.
-	if transformedItems != nil || len(transformedProperties) > 0 || transformedAdditionalProperties != nil {
-		res.AllOf = append(res.AllOf, spec.Schema{
-			SchemaProps: spec.SchemaProps{
-				Items:                transformedItems,
-				Properties:           transformedProperties,
-				AdditionalProperties: transformedAdditionalProperties,
-			},
-		})
-	}
-
 	return &res, nil
 }
 
@@ -264,7 +134,7 @@ func (c commentTags) Validate() error {
 	if c.Minimum != nil && c.Maximum != nil && *c.Minimum > *c.Maximum {
 		err = errors.Join(err, fmt.Errorf("minimum %f is greater than maximum %f", *c.Minimum, *c.Maximum))
 	}
-	if (c.ExclusiveMinimum != nil || c.ExclusiveMaximum != nil) && c.Minimum != nil && c.Maximum != nil && *c.Minimum == *c.Maximum {
+	if (c.ExclusiveMinimum || c.ExclusiveMaximum) && c.Minimum != nil && c.Maximum != nil && *c.Minimum == *c.Maximum {
 		err = errors.Join(err, fmt.Errorf("exclusiveMinimum/Maximum cannot be set when minimum == maximum"))
 	}
 	if c.MinLength != nil && c.MaxLength != nil && *c.MinLength > *c.MaxLength {
@@ -276,10 +146,10 @@ func (c commentTags) Validate() error {
 	if c.MinProperties != nil && c.MaxProperties != nil && *c.MinProperties > *c.MaxProperties {
 		err = errors.Join(err, fmt.Errorf("minProperties %d is greater than maxProperties %d", *c.MinProperties, *c.MaxProperties))
 	}
-	if c.Pattern != nil {
-		_, e := regexp.Compile(*c.Pattern)
+	if c.Pattern != "" {
+		_, e := regexp.Compile(c.Pattern)
 		if e != nil {
-			err = errors.Join(err, fmt.Errorf("invalid pattern %q: %v", *c.Pattern, e))
+			err = errors.Join(err, fmt.Errorf("invalid pattern %q: %v", c.Pattern, e))
 		}
 	}
 	if c.MultipleOf != nil && *c.MultipleOf == 0 {
@@ -305,23 +175,10 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	typeString, _ := openapi.OpenAPITypeFormat(resolvedType.String()) // will be empty for complicated types
 
 	// Structs and interfaces may dynamically be any type, so we cant validate them
-	// easily.
+	// easily. We may be able to if we check that they don't implement all the
+	// override functions, but for now we just skip them.
 	if resolvedType.Kind == types.Interface || resolvedType.Kind == types.Struct {
-		// Skip validation for structs and interfaces which implement custom
-		// overrides
-		//
-		// Only check top-level t type without resolving alias to mirror generator
-		// behavior. Generator only checks the top level type without resolving
-		// alias. The `has*Method` functions can be changed to add this behavior in the
-		// future if needed.
-		elemT := resolvePtrType(t)
-		if hasOpenAPIDefinitionMethod(elemT) ||
-			hasOpenAPIDefinitionMethods(elemT) ||
-			hasOpenAPIV3DefinitionMethod(elemT) ||
-			hasOpenAPIV3OneOfMethod(elemT) {
-
-			return nil
-		}
+		return nil
 	}
 
 	isArray := resolvedType.Kind == types.Slice || resolvedType.Kind == types.Array
@@ -329,7 +186,6 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	isString := typeString == "string"
 	isInt := typeString == "integer"
 	isFloat := typeString == "number"
-	isStruct := resolvedType.Kind == types.Struct
 
 	if c.MaxItems != nil && !isArray {
 		err = errors.Join(err, fmt.Errorf("maxItems can only be used on array types"))
@@ -337,13 +193,13 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	if c.MinItems != nil && !isArray {
 		err = errors.Join(err, fmt.Errorf("minItems can only be used on array types"))
 	}
-	if c.UniqueItems != nil && !isArray {
+	if c.UniqueItems && !isArray {
 		err = errors.Join(err, fmt.Errorf("uniqueItems can only be used on array types"))
 	}
-	if c.MaxProperties != nil && !(isMap || isStruct) {
+	if c.MaxProperties != nil && !isMap {
 		err = errors.Join(err, fmt.Errorf("maxProperties can only be used on map types"))
 	}
-	if c.MinProperties != nil && !(isMap || isStruct) {
+	if c.MinProperties != nil && !isMap {
 		err = errors.Join(err, fmt.Errorf("minProperties can only be used on map types"))
 	}
 	if c.MinLength != nil && !isString {
@@ -352,7 +208,7 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	if c.MaxLength != nil && !isString {
 		err = errors.Join(err, fmt.Errorf("maxLength can only be used on string types"))
 	}
-	if c.Pattern != nil && !isString {
+	if c.Pattern != "" && !isString {
 		err = errors.Join(err, fmt.Errorf("pattern can only be used on string types"))
 	}
 	if c.Minimum != nil && !isInt && !isFloat {
@@ -364,62 +220,22 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	if c.MultipleOf != nil && !isInt && !isFloat {
 		err = errors.Join(err, fmt.Errorf("multipleOf can only be used on numeric types"))
 	}
-	if c.ExclusiveMinimum != nil && !isInt && !isFloat {
+	if c.ExclusiveMinimum && !isInt && !isFloat {
 		err = errors.Join(err, fmt.Errorf("exclusiveMinimum can only be used on numeric types"))
 	}
-	if c.ExclusiveMaximum != nil && !isInt && !isFloat {
+	if c.ExclusiveMaximum && !isInt && !isFloat {
 		err = errors.Join(err, fmt.Errorf("exclusiveMaximum can only be used on numeric types"))
 	}
-	if c.AdditionalProperties != nil && !isMap {
-		err = errors.Join(err, fmt.Errorf("additionalProperties can only be used on map types"))
-
-		if err == nil {
-			err = errors.Join(err, c.AdditionalProperties.ValidateType(t))
-		}
-	}
-	if c.Items != nil && !isArray {
-		err = errors.Join(err, fmt.Errorf("items can only be used on array types"))
-
-		if err == nil {
-			err = errors.Join(err, c.Items.ValidateType(t))
-		}
-	}
-	if c.Properties != nil {
-		if !isStruct && !isMap {
-			err = errors.Join(err, fmt.Errorf("properties can only be used on struct types"))
-		} else if isStruct && err == nil {
-			for key, tags := range c.Properties {
-				if member := memberWithJSONName(resolvedType, key); member == nil {
-					err = errors.Join(err, fmt.Errorf("property used in comment tag %q not found in struct %s", key, resolvedType.String()))
-				} else if nestedErr := tags.ValidateType(member.Type); nestedErr != nil {
-					err = errors.Join(err, fmt.Errorf("failed to validate property %q: %w", key, nestedErr))
-				}
-			}
-		}
-	}
 
 	return err
 }
 
-func memberWithJSONName(t *types.Type, key string) *types.Member {
-	for _, member := range t.Members {
-		tags := getJsonTags(&member)
-		if len(tags) > 0 && tags[0] == key {
-			return &member
-		} else if member.Embedded {
-			if embeddedMember := memberWithJSONName(member.Type, key); embeddedMember != nil {
-				return embeddedMember
-			}
-		}
-	}
-	return nil
-}
-
-// ParseCommentTags parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result.
+// Parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result.
 // Accepts an optional type to validate against, and a prefix to filter out markers not related to validation.
 // Accepts a prefix to filter out markers not related to validation.
 // Returns any errors encountered while parsing or validating the comment tags.
 func ParseCommentTags(t *types.Type, comments []string, prefix string) (*spec.Schema, error) {
+
 	markers, err := parseMarkers(comments, prefix)
 	if err != nil {
 		return nil, fmt.Errorf("failed to parse marker comments: %w", err)
@@ -639,8 +455,6 @@ func parseMarkers(markerComments []string, prefix string) (map[string]any, error
 
 		if len(key) == 0 {
 			return nil, fmt.Errorf("cannot have empty key for marker comment")
-		} else if !isKnownTagCommentKey(key) {
-			continue
 		} else if _, ok := parseSymbolReference(value, ""); ok {
 			// Skip ref markers
 			continue
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
index c5c0093818..743f5b8b2e 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
@@ -249,7 +249,7 @@ func methodReturnsValue(mt *types.Type, pkg, name string) bool {
 		return false
 	}
 	r := mt.Signature.Results[0]
-	return r.Type.Name.Name == name && r.Type.Name.Package == pkg
+	return r.Name.Name == name && r.Name.Package == pkg
 }
 
 func hasOpenAPIV3DefinitionMethod(t *types.Type) bool {
@@ -362,88 +362,6 @@ func (g openAPITypeWriter) generateCall(t *types.Type) error {
 	return g.Error()
 }
 
-// Generates Go code to represent an OpenAPI schema. May be refactored in
-// the future to take more responsibility as we transition from an on-line
-// approach to parsing the comments to spec.Schema
-func (g openAPITypeWriter) generateSchema(s *spec.Schema) error {
-	if !reflect.DeepEqual(s.SchemaProps, spec.SchemaProps{}) {
-		g.Do("SchemaProps: spec.SchemaProps{\n", nil)
-		err := g.generateValueValidations(&s.SchemaProps)
-		if err != nil {
-			return err
-		}
-
-		if len(s.Properties) > 0 {
-			g.Do("Properties: map[string]spec.Schema{\n", nil)
-
-			// Sort property names to generate deterministic output
-			keys := []string{}
-			for k := range s.Properties {
-				keys = append(keys, k)
-			}
-			sort.Strings(keys)
-
-			for _, k := range keys {
-				v := s.Properties[k]
-				g.Do("$.$: {\n", fmt.Sprintf("%#v", k))
-				err := g.generateSchema(&v)
-				if err != nil {
-					return err
-				}
-				g.Do("},\n", nil)
-			}
-			g.Do("},\n", nil)
-		}
-
-		if s.AdditionalProperties != nil && s.AdditionalProperties.Schema != nil {
-			g.Do("AdditionalProperties: &spec.SchemaOrBool{\n", nil)
-			g.Do("Allows: true,\n", nil)
-			g.Do("Schema: &spec.Schema{\n", nil)
-			err := g.generateSchema(s.AdditionalProperties.Schema)
-			if err != nil {
-				return err
-			}
-			g.Do("},\n", nil)
-			g.Do("},\n", nil)
-		}
-
-		if s.Items != nil && s.Items.Schema != nil {
-			g.Do("Items: &spec.SchemaOrArray{\n", nil)
-			g.Do("Schema: &spec.Schema{\n", nil)
-			err := g.generateSchema(s.Items.Schema)
-			if err != nil {
-				return err
-			}
-			g.Do("},\n", nil)
-			g.Do("},\n", nil)
-		}
-
-		g.Do("},\n", nil)
-	}
-
-	if len(s.Extensions) > 0 {
-		g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil)
-
-		// Sort extension keys to generate deterministic output
-		keys := []string{}
-		for k := range s.Extensions {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-
-		for _, k := range keys {
-			v := s.Extensions[k]
-			g.Do("$.key$: $.value$,\n", map[string]interface{}{
-				"key":   fmt.Sprintf("%#v", k),
-				"value": fmt.Sprintf("%#v", v),
-			})
-		}
-		g.Do("},\n},\n", nil)
-	}
-
-	return nil
-}
-
 func (g openAPITypeWriter) generateValueValidations(vs *spec.SchemaProps) error {
 
 	if vs == nil {
@@ -502,18 +420,6 @@ func (g openAPITypeWriter) generateValueValidations(vs *spec.SchemaProps) error
 		g.Do("UniqueItems: true,\n", nil)
 	}
 
-	if len(vs.AllOf) > 0 {
-		g.Do("AllOf: []spec.Schema{\n", nil)
-		for _, s := range vs.AllOf {
-			g.Do("{\n", nil)
-			if err := g.generateSchema(&s); err != nil {
-				return err
-			}
-			g.Do("},\n", nil)
-		}
-		g.Do("},\n", nil)
-	}
-
 	return nil
 }
 
@@ -523,7 +429,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
 	case types.Struct:
 		validationSchema, err := ParseCommentTags(t, t.CommentLines, markerPrefix)
 		if err != nil {
-			return fmt.Errorf("failed parsing comment tags for %v: %w", t.String(), err)
+			return err
 		}
 
 		hasV2Definition := hasOpenAPIDefinitionMethod(t)
@@ -738,15 +644,7 @@ func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union
 	}
 
 	if len(otherExtensions) > 0 {
-		// Sort extension keys to generate deterministic output
-		keys := []string{}
-		for k := range otherExtensions {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-
-		for _, k := range keys {
-			v := otherExtensions[k]
+		for k, v := range otherExtensions {
 			g.Do("$.key$: $.value$,\n", map[string]interface{}{
 				"key":   fmt.Sprintf("%#v", k),
 				"value": fmt.Sprintf("%#v", v),
@@ -806,7 +704,7 @@ func defaultFromComments(comments []string, commentPath string, t *types.Type) (
 
 	var i interface{}
 	if id, ok := parseSymbolReference(tag, commentPath); ok {
-		klog.V(5).Infof("%v, %v", id, commentPath)
+		klog.Errorf("%v, %v", id, commentPath)
 		return nil, &id, nil
 	} else if err := json.Unmarshal([]byte(tag), &i); err != nil {
 		return nil, nil, fmt.Errorf("failed to unmarshal default: %v", err)
@@ -946,9 +844,15 @@ func (g openAPITypeWriter) generateDescription(CommentLines []string) {
 		}
 	}
 
-	postDoc := strings.TrimSpace(buffer.String())
-	if len(postDoc) > 0 {
-		g.Do("Description: $.$,\n", fmt.Sprintf("%#v", postDoc))
+	postDoc := strings.TrimLeft(buffer.String(), "\n")
+	postDoc = strings.TrimRight(postDoc, "\n")
+	postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to "
+	postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape "
+	postDoc = strings.Replace(postDoc, "\n", "\\n", -1)
+	postDoc = strings.Replace(postDoc, "\t", "\\t", -1)
+	postDoc = strings.Trim(postDoc, " ")
+	if postDoc != "" {
+		g.Do("Description: \"$.$\",\n", postDoc)
 	}
 }
 
@@ -1030,17 +934,6 @@ func (g openAPITypeWriter) generateReferenceProperty(t *types.Type) {
 	g.Do("Ref: ref(\"$.$\"),\n", t.Name.String())
 }
 
-func resolvePtrType(t *types.Type) *types.Type {
-	var prev *types.Type
-	for prev != t {
-		prev = t
-		if t.Kind == types.Pointer {
-			t = t.Elem
-		}
-	}
-	return t
-}
-
 func resolveAliasAndPtrType(t *types.Type) *types.Type {
 	var prev *types.Type
 	for prev != t {
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
index d7655f0d92..af30edc5ed 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
@@ -32,6 +32,14 @@ var (
 		"-",
 	)
 
+	// Blacklist of JSON names that should skip match evaluation
+	jsonNameBlacklist = sets.NewString(
+		// Empty name is used for inline struct field (e.g. metav1.TypeMeta)
+		"",
+		// Special case for object and list meta
+		"metadata",
+	)
+
 	// List of substrings that aren't allowed in Go name and JSON name
 	disallowedNameSubstrings = sets.NewString(
 		// Underscore is not allowed in either name
@@ -65,11 +73,12 @@ is also considered matched.
 
 	HTTPJSONSpec   httpjsonSpec   true
 
-NOTE: an empty JSON name is valid only for inlined structs or pointer to structs.
-It cannot be empty for anything else because capitalization must be set explicitly.
+NOTE: JSON names in jsonNameBlacklist should skip evaluation
 
-NOTE: metav1.ListMeta and metav1.ObjectMeta by convention must have "metadata" as name.
-Other fields may have that JSON name if the field name matches.
+	                              true
+	podSpec                       true
+	podSpec        -              true
+	podSpec        metadata       true
 */
 type NamesMatch struct{}
 
@@ -100,7 +109,7 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) {
 				continue
 			}
 			jsonName := strings.Split(jsonTag, ",")[0]
-			if !nameIsOkay(m, jsonName) {
+			if !namesMatch(goName, jsonName) {
 				fields = append(fields, goName)
 			}
 		}
@@ -108,22 +117,6 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) {
 	return fields, nil
 }
 
-func nameIsOkay(member types.Member, jsonName string) bool {
-	if jsonName == "" {
-		return member.Type.Kind == types.Struct ||
-			member.Type.Kind == types.Pointer && member.Type.Elem.Kind == types.Struct
-	}
-
-	typeName := member.Type.String()
-	switch typeName {
-	case "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta",
-		"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta":
-		return jsonName == "metadata"
-	}
-
-	return namesMatch(member.Name, jsonName)
-}
-
 // namesMatch evaluates if goName and jsonName match the API rule
 // TODO: Use an off-the-shelf CamelCase solution instead of implementing this logic. The following existing
 //
@@ -136,6 +129,9 @@ func nameIsOkay(member types.Member, jsonName string) bool {
 //		 about why they don't satisfy our need. What we need can be a function that detects an acronym at the
 //		 beginning of a string.
 func namesMatch(goName, jsonName string) bool {
+	if jsonNameBlacklist.Has(jsonName) {
+		return true
+	}
 	if !isAllowedName(goName) || !isAllowedName(jsonName) {
 		return false
 	}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler/handler.go
index 5102e71251..5fc6297734 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler/handler.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler/handler.go
@@ -26,10 +26,10 @@ import (
 
 	"github.com/NYTimes/gziphandler"
 	"github.com/emicklei/go-restful/v3"
+	"github.com/golang/protobuf/proto"
 	openapi_v2 "github.com/google/gnostic-models/openapiv2"
 	"github.com/google/uuid"
 	"github.com/munnerz/goautoneg"
-	"google.golang.org/protobuf/proto"
 
 	klog "k8s.io/klog/v2"
 	"k8s.io/kube-openapi/pkg/builder"
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
index 10f0b385fa..fc45634887 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
@@ -29,10 +29,10 @@ import (
 	"sync"
 	"time"
 
+	"github.com/golang/protobuf/proto"
 	openapi_v3 "github.com/google/gnostic-models/openapiv3"
 	"github.com/google/uuid"
 	"github.com/munnerz/goautoneg"
-	"google.golang.org/protobuf/proto"
 
 	"k8s.io/klog/v2"
 	"k8s.io/kube-openapi/pkg/cached"
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/LICENSE b/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/LICENSE
deleted file mode 100644
index 2f9a31fadf..0000000000
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Alex Saskevich
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/patterns.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/patterns.go
deleted file mode 100644
index 6e02f2d002..0000000000
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/patterns.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package govalidator
-
-import "regexp"
-
-// Basic regular expressions for validating strings
-const (
-	CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
-	ISBN10     string = "^(?:[0-9]{9}X|[0-9]{10})$"
-	ISBN13     string = "^(?:[0-9]{13})$"
-	Hexcolor   string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
-	RGBcolor   string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
-	Base64     string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
-	SSN        string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
-	Int        string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
-)
-
-var (
-	rxCreditCard = regexp.MustCompile(CreditCard)
-	rxInt        = regexp.MustCompile(Int)
-	rxISBN10     = regexp.MustCompile(ISBN10)
-	rxISBN13     = regexp.MustCompile(ISBN13)
-	rxHexcolor   = regexp.MustCompile(Hexcolor)
-	rxRGBcolor   = regexp.MustCompile(RGBcolor)
-	rxBase64     = regexp.MustCompile(Base64)
-	rxSSN        = regexp.MustCompile(SSN)
-)
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/validator.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/validator.go
deleted file mode 100644
index 4d089508a2..0000000000
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/validator.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Package govalidator is package of validators and sanitizers for strings, structs and collections.
-package govalidator
-
-import (
-	"fmt"
-	"net"
-	"net/url"
-	"reflect"
-	"regexp"
-	"strconv"
-	"strings"
-)
-
-var (
-	notNumberRegexp     = regexp.MustCompile("[^0-9]+")
-	whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
-)
-
-// IsRequestURI check if the string rawurl, assuming
-// it was received in an HTTP request, is an
-// absolute URI or an absolute path.
-func IsRequestURI(rawurl string) bool {
-	_, err := url.ParseRequestURI(rawurl)
-	return err == nil
-}
-
-// IsHexcolor check if the string is a hexadecimal color.
-func IsHexcolor(str string) bool {
-	return rxHexcolor.MatchString(str)
-}
-
-// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
-func IsRGBcolor(str string) bool {
-	return rxRGBcolor.MatchString(str)
-}
-
-// IsCreditCard check if the string is a credit card.
-func IsCreditCard(str string) bool {
-	sanitized := notNumberRegexp.ReplaceAllString(str, "")
-	if !rxCreditCard.MatchString(sanitized) {
-		return false
-	}
-	var sum int64
-	var digit string
-	var tmpNum int64
-	var shouldDouble bool
-	for i := len(sanitized) - 1; i >= 0; i-- {
-		digit = sanitized[i:(i + 1)]
-		tmpNum, _ = ToInt(digit)
-		if shouldDouble {
-			tmpNum *= 2
-			if tmpNum >= 10 {
-				sum += (tmpNum % 10) + 1
-			} else {
-				sum += tmpNum
-			}
-		} else {
-			sum += tmpNum
-		}
-		shouldDouble = !shouldDouble
-	}
-
-	return sum%10 == 0
-}
-
-// IsISBN10 check if the string is an ISBN version 10.
-func IsISBN10(str string) bool {
-	return IsISBN(str, 10)
-}
-
-// IsISBN13 check if the string is an ISBN version 13.
-func IsISBN13(str string) bool {
-	return IsISBN(str, 13)
-}
-
-// IsISBN check if the string is an ISBN (version 10 or 13).
-// If version value is not equal to 10 or 13, it will be check both variants.
-func IsISBN(str string, version int) bool {
-	sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
-	var checksum int32
-	var i int32
-	if version == 10 {
-		if !rxISBN10.MatchString(sanitized) {
-			return false
-		}
-		for i = 0; i < 9; i++ {
-			checksum += (i + 1) * int32(sanitized[i]-'0')
-		}
-		if sanitized[9] == 'X' {
-			checksum += 10 * 10
-		} else {
-			checksum += 10 * int32(sanitized[9]-'0')
-		}
-		if checksum%11 == 0 {
-			return true
-		}
-		return false
-	} else if version == 13 {
-		if !rxISBN13.MatchString(sanitized) {
-			return false
-		}
-		factor := []int32{1, 3}
-		for i = 0; i < 12; i++ {
-			checksum += factor[i%2] * int32(sanitized[i]-'0')
-		}
-		return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
-	}
-	return IsISBN(str, 10) || IsISBN(str, 13)
-}
-
-// IsBase64 check if a string is base64 encoded.
-func IsBase64(str string) bool {
-	return rxBase64.MatchString(str)
-}
-
-// IsIPv6 check if the string is an IP version 6.
-func IsIPv6(str string) bool {
-	ip := net.ParseIP(str)
-	return ip != nil && strings.Contains(str, ":")
-}
-
-// IsMAC check if a string is valid MAC address.
-// Possible MAC formats:
-// 01:23:45:67:89:ab
-// 01:23:45:67:89:ab:cd:ef
-// 01-23-45-67-89-ab
-// 01-23-45-67-89-ab-cd-ef
-// 0123.4567.89ab
-// 0123.4567.89ab.cdef
-func IsMAC(str string) bool {
-	_, err := net.ParseMAC(str)
-	return err == nil
-}
-
-// IsSSN will validate the given string as a U.S. Social Security Number
-func IsSSN(str string) bool {
-	if str == "" || len(str) != 11 {
-		return false
-	}
-	return rxSSN.MatchString(str)
-}
-
-// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
-func ToInt(value interface{}) (res int64, err error) {
-	val := reflect.ValueOf(value)
-
-	switch value.(type) {
-	case int, int8, int16, int32, int64:
-		res = val.Int()
-	case uint, uint8, uint16, uint32, uint64:
-		res = int64(val.Uint())
-	case string:
-		if IsInt(val.String()) {
-			res, err = strconv.ParseInt(val.String(), 0, 64)
-			if err != nil {
-				res = 0
-			}
-		} else {
-			err = fmt.Errorf("math: square root of negative number %g", value)
-			res = 0
-		}
-	default:
-		err = fmt.Errorf("math: square root of negative number %g", value)
-		res = 0
-	}
-
-	return
-}
-
-// IsInt check if the string is an integer. Empty string is valid.
-func IsInt(str string) bool {
-	if IsNull(str) {
-		return true
-	}
-	return rxInt.MatchString(str)
-}
-
-// IsNull check if the string is null.
-func IsNull(str string) bool {
-	return len(str) == 0
-}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
index 1b758ab25a..5789e67ab7 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
@@ -22,7 +22,7 @@ import (
 	"strings"
 
 	openapi_v2 "github.com/google/gnostic-models/openapiv2"
-	yaml "sigs.k8s.io/yaml/goyaml.v2"
+	"gopkg.in/yaml.v2"
 )
 
 func newSchemaError(path *Path, format string, a ...interface{}) error {
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go
index 97b2f989e9..e85b0f1b46 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go
@@ -22,9 +22,9 @@ import (
 	"regexp"
 	"strings"
 
-	netutils "k8s.io/utils/net"
+	"github.com/asaskevich/govalidator"
 
-	"k8s.io/kube-openapi/pkg/internal/third_party/govalidator"
+	netutils "k8s.io/utils/net"
 )
 
 const (
diff --git a/hack/tools/vendor/modules.txt b/hack/tools/vendor/modules.txt
index 5ff371cfd8..7a0a9d0d91 100644
--- a/hack/tools/vendor/modules.txt
+++ b/hack/tools/vendor/modules.txt
@@ -1,3 +1,6 @@
+# github.com/Masterminds/semver/v3 v3.4.0
+## explicit; go 1.21
+github.com/Masterminds/semver/v3
 # github.com/NYTimes/gziphandler v1.1.1
 ## explicit; go 1.11
 github.com/NYTimes/gziphandler
@@ -146,7 +149,7 @@ github.com/google/shlex
 # github.com/google/uuid v1.6.0
 ## explicit
 github.com/google/uuid
-# github.com/gophercloud/gophercloud/v2 v2.7.0
+# github.com/gophercloud/gophercloud/v2 v2.9.0
 ## explicit; go 1.22
 github.com/gophercloud/gophercloud/v2
 github.com/gophercloud/gophercloud/v2/openstack
@@ -201,7 +204,7 @@ github.com/gophercloud/utils/v2/openstack/clientconfig
 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
 github.com/grpc-ecosystem/grpc-gateway/v2/runtime
 github.com/grpc-ecosystem/grpc-gateway/v2/utilities
-# github.com/hashicorp/go-version v1.7.0
+# github.com/hashicorp/go-version v1.8.0
 ## explicit
 github.com/hashicorp/go-version
 # github.com/imdario/mergo v0.3.15
@@ -260,11 +263,12 @@ github.com/monochromegane/go-gitignore
 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
 ## explicit
 github.com/munnerz/goautoneg
-# github.com/onsi/ginkgo/v2 v2.23.4
+# github.com/onsi/ginkgo/v2 v2.27.3
 ## explicit; go 1.23.0
 github.com/onsi/ginkgo/v2/config
 github.com/onsi/ginkgo/v2/formatter
 github.com/onsi/ginkgo/v2/ginkgo
+github.com/onsi/ginkgo/v2/ginkgo/automaxprocs
 github.com/onsi/ginkgo/v2/ginkgo/build
 github.com/onsi/ginkgo/v2/ginkgo/command
 github.com/onsi/ginkgo/v2/ginkgo/generators
@@ -276,9 +280,10 @@ github.com/onsi/ginkgo/v2/ginkgo/unfocus
 github.com/onsi/ginkgo/v2/ginkgo/watch
 github.com/onsi/ginkgo/v2/internal/interrupt_handler
 github.com/onsi/ginkgo/v2/internal/parallel_support
+github.com/onsi/ginkgo/v2/internal/reporters
 github.com/onsi/ginkgo/v2/reporters
 github.com/onsi/ginkgo/v2/types
-# github.com/onsi/gomega v1.37.0
+# github.com/onsi/gomega v1.38.2
 ## explicit; go 1.23.0
 github.com/onsi/gomega
 github.com/onsi/gomega/format
@@ -294,8 +299,8 @@ github.com/onsi/gomega/types
 # github.com/pkg/errors v0.9.1
 ## explicit
 github.com/pkg/errors
-# github.com/prometheus/client_golang v1.22.0
-## explicit; go 1.22
+# github.com/prometheus/client_golang v1.23.2
+## explicit; go 1.23.0
 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
 github.com/prometheus/client_golang/prometheus
@@ -306,15 +311,15 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal
 github.com/prometheus/client_golang/prometheus/testutil
 github.com/prometheus/client_golang/prometheus/testutil/promlint
 github.com/prometheus/client_golang/prometheus/testutil/promlint/validations
-# github.com/prometheus/client_model v0.6.1
-## explicit; go 1.19
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.62.0
-## explicit; go 1.21
+# github.com/prometheus/common v0.66.1
+## explicit; go 1.23.0
 github.com/prometheus/common/expfmt
 github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.15.1
-## explicit; go 1.20
+# github.com/prometheus/procfs v0.16.1
+## explicit; go 1.23.0
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
 github.com/prometheus/procfs/internal/util
@@ -335,7 +340,7 @@ github.com/spf13/afero/mem
 # github.com/spf13/cobra v1.8.1
 ## explicit; go 1.15
 github.com/spf13/cobra
-# github.com/spf13/pflag v1.0.6
+# github.com/spf13/pflag v1.0.10
 ## explicit; go 1.12
 github.com/spf13/pflag
 # github.com/stoewer/go-strcase v1.3.0
@@ -401,14 +406,8 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1
 go.opentelemetry.io/proto/otlp/common/v1
 go.opentelemetry.io/proto/otlp/resource/v1
 go.opentelemetry.io/proto/otlp/trace/v1
-# go.uber.org/automaxprocs v1.6.0
-## explicit; go 1.20
-go.uber.org/automaxprocs
-go.uber.org/automaxprocs/internal/cgroups
-go.uber.org/automaxprocs/internal/runtime
-go.uber.org/automaxprocs/maxprocs
-# go.uber.org/mock v0.5.2
-## explicit; go 1.23
+# go.uber.org/mock v0.6.0
+## explicit; go 1.23.0
 go.uber.org/mock/gomock
 go.uber.org/mock/mockgen
 go.uber.org/mock/mockgen/model
@@ -426,18 +425,24 @@ go.uber.org/zap/internal/exit
 go.uber.org/zap/internal/pool
 go.uber.org/zap/internal/stacktrace
 go.uber.org/zap/zapcore
+# go.yaml.in/yaml/v2 v2.4.2
+## explicit; go 1.15
+go.yaml.in/yaml/v2
+# go.yaml.in/yaml/v3 v3.0.4
+## explicit; go 1.16
+go.yaml.in/yaml/v3
 # golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
 ## explicit; go 1.20
 golang.org/x/exp/constraints
 golang.org/x/exp/maps
 golang.org/x/exp/slices
-# golang.org/x/mod v0.25.0
+# golang.org/x/mod v0.27.0
 ## explicit; go 1.23.0
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.40.0
+# golang.org/x/net v0.43.0
 ## explicit; go 1.23.0
 golang.org/x/net/html
 golang.org/x/net/html/atom
@@ -450,25 +455,25 @@ golang.org/x/net/internal/httpcommon
 golang.org/x/net/internal/timeseries
 golang.org/x/net/trace
 golang.org/x/net/websocket
-# golang.org/x/oauth2 v0.24.0
-## explicit; go 1.18
+# golang.org/x/oauth2 v0.30.0
+## explicit; go 1.23.0
 golang.org/x/oauth2
 golang.org/x/oauth2/internal
-# golang.org/x/sync v0.15.0
+# golang.org/x/sync v0.16.0
 ## explicit; go 1.23.0
 golang.org/x/sync/errgroup
 golang.org/x/sync/singleflight
-# golang.org/x/sys v0.33.0
+# golang.org/x/sys v0.35.0
 ## explicit; go 1.23.0
 golang.org/x/sys/execabs
 golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
 golang.org/x/sys/windows/registry
-# golang.org/x/term v0.32.0
+# golang.org/x/term v0.34.0
 ## explicit; go 1.23.0
 golang.org/x/term
-# golang.org/x/text v0.26.0
+# golang.org/x/text v0.28.0
 ## explicit; go 1.23.0
 golang.org/x/text/cases
 golang.org/x/text/encoding
@@ -503,10 +508,11 @@ golang.org/x/text/width
 # golang.org/x/time v0.5.0
 ## explicit; go 1.18
 golang.org/x/time/rate
-# golang.org/x/tools v0.33.0
+# golang.org/x/tools v0.36.0
 ## explicit; go 1.23.0
 golang.org/x/tools/cover
 golang.org/x/tools/go/ast/astutil
+golang.org/x/tools/go/ast/edge
 golang.org/x/tools/go/ast/inspector
 golang.org/x/tools/go/gcexportdata
 golang.org/x/tools/go/packages
@@ -514,7 +520,6 @@ golang.org/x/tools/go/types/objectpath
 golang.org/x/tools/go/types/typeutil
 golang.org/x/tools/imports
 golang.org/x/tools/internal/aliases
-golang.org/x/tools/internal/astutil/edge
 golang.org/x/tools/internal/event
 golang.org/x/tools/internal/event/core
 golang.org/x/tools/internal/event/keys
@@ -530,6 +535,8 @@ golang.org/x/tools/internal/stdlib
 golang.org/x/tools/internal/typeparams
 golang.org/x/tools/internal/typesinternal
 golang.org/x/tools/internal/versions
+# golang.org/x/tools/go/expect v0.1.0-deprecated
+## explicit; go 1.23.0
 # golang.org/x/tools/go/vcs v0.1.0-deprecated
 ## explicit; go 1.19
 golang.org/x/tools/go/vcs
@@ -599,8 +606,8 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.5
-## explicit; go 1.21
+# google.golang.org/protobuf v1.36.8
+## explicit; go 1.23
 google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
@@ -656,7 +663,7 @@ gopkg.in/yaml.v2
 # gopkg.in/yaml.v3 v3.0.1
 ## explicit
 gopkg.in/yaml.v3
-# k8s.io/api v0.31.9
+# k8s.io/api v0.31.14
 ## explicit; go 1.22.0
 k8s.io/api/admission/v1
 k8s.io/api/admission/v1beta1
@@ -715,12 +722,12 @@ k8s.io/api/storage/v1
 k8s.io/api/storage/v1alpha1
 k8s.io/api/storage/v1beta1
 k8s.io/api/storagemigration/v1alpha1
-# k8s.io/apiextensions-apiserver v0.31.9
+# k8s.io/apiextensions-apiserver v0.31.14
 ## explicit; go 1.22.0
 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
-# k8s.io/apimachinery v0.31.9
+# k8s.io/apimachinery v0.31.14
 ## explicit; go 1.22.0
 k8s.io/apimachinery/pkg/api/apitesting
 k8s.io/apimachinery/pkg/api/apitesting/fuzzer
@@ -784,7 +791,7 @@ k8s.io/apimachinery/pkg/version
 k8s.io/apimachinery/pkg/watch
 k8s.io/apimachinery/third_party/forked/golang/json
 k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/apiserver v0.31.9
+# k8s.io/apiserver v0.31.14
 ## explicit; go 1.22.0
 k8s.io/apiserver/pkg/admission
 k8s.io/apiserver/pkg/apis/apiserver
@@ -845,7 +852,7 @@ k8s.io/apiserver/pkg/warning
 k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
 k8s.io/apiserver/plugin/pkg/authorizer/webhook
 k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics
-# k8s.io/client-go v0.31.9
+# k8s.io/client-go v0.31.14
 ## explicit; go 1.22.0
 k8s.io/client-go/applyconfigurations/admissionregistration/v1
 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
@@ -1115,7 +1122,7 @@ k8s.io/client-go/util/workqueue
 ## explicit; go 1.22.0
 k8s.io/cluster-bootstrap/token/api
 k8s.io/cluster-bootstrap/token/util
-# k8s.io/code-generator v0.31.9
+# k8s.io/code-generator v0.31.14
 ## explicit; go 1.22.0
 k8s.io/code-generator
 k8s.io/code-generator/cmd/applyconfiguration-gen
@@ -1151,7 +1158,7 @@ k8s.io/code-generator/cmd/register-gen/generators
 k8s.io/code-generator/pkg/namer
 k8s.io/code-generator/pkg/util
 k8s.io/code-generator/third_party/forked/golang/reflect
-# k8s.io/component-base v0.31.9
+# k8s.io/component-base v0.31.14
 ## explicit; go 1.22.0
 k8s.io/component-base/cli/flag
 k8s.io/component-base/featuregate
@@ -1194,8 +1201,8 @@ k8s.io/klog/v2/internal/severity
 k8s.io/klog/v2/internal/sloghandler
 k8s.io/klog/v2/internal/verbosity
 k8s.io/klog/v2/textlogger
-# k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7
-## explicit; go 1.21
+# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
+## explicit; go 1.20
 k8s.io/kube-openapi/cmd/openapi-gen
 k8s.io/kube-openapi/cmd/openapi-gen/args
 k8s.io/kube-openapi/pkg/builder
@@ -1210,7 +1217,6 @@ k8s.io/kube-openapi/pkg/handler
 k8s.io/kube-openapi/pkg/handler3
 k8s.io/kube-openapi/pkg/internal
 k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json
-k8s.io/kube-openapi/pkg/internal/third_party/govalidator
 k8s.io/kube-openapi/pkg/schemaconv
 k8s.io/kube-openapi/pkg/schemamutation
 k8s.io/kube-openapi/pkg/spec3
@@ -1241,7 +1247,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
-# sigs.k8s.io/cluster-api v1.9.8
+# sigs.k8s.io/cluster-api v1.9.11
 ## explicit; go 1.22.0
 sigs.k8s.io/cluster-api/api/v1beta1
 sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1
@@ -1369,7 +1375,7 @@ sigs.k8s.io/json/internal/golang/encoding/json
 # sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d
 ## explicit; go 1.16
 sigs.k8s.io/kubebuilder/docs/book/utils/plugin
-# sigs.k8s.io/kustomize/api v0.19.0
+# sigs.k8s.io/kustomize/api v0.18.0
 ## explicit; go 1.22.7
 sigs.k8s.io/kustomize/api/filters/annotations
 sigs.k8s.io/kustomize/api/filters/fieldspec
@@ -1419,7 +1425,7 @@ sigs.k8s.io/kustomize/api/provider
 sigs.k8s.io/kustomize/api/resmap
 sigs.k8s.io/kustomize/api/resource
 sigs.k8s.io/kustomize/api/types
-# sigs.k8s.io/kustomize/cmd/config v0.19.0
+# sigs.k8s.io/kustomize/cmd/config v0.15.0
 ## explicit; go 1.22.7
 sigs.k8s.io/kustomize/cmd/config/completion
 sigs.k8s.io/kustomize/cmd/config/configcobra
@@ -1430,7 +1436,7 @@ sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/api
 sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/commands
 sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/tutorials
 sigs.k8s.io/kustomize/cmd/config/runner
-# sigs.k8s.io/kustomize/kustomize/v5 v5.6.0
+# sigs.k8s.io/kustomize/kustomize/v5 v5.5.0
 ## explicit; go 1.22.7
 sigs.k8s.io/kustomize/kustomize/v5
 sigs.k8s.io/kustomize/kustomize/v5/commands
@@ -1449,7 +1455,7 @@ sigs.k8s.io/kustomize/kustomize/v5/commands/openapi
 sigs.k8s.io/kustomize/kustomize/v5/commands/openapi/fetch
 sigs.k8s.io/kustomize/kustomize/v5/commands/openapi/info
 sigs.k8s.io/kustomize/kustomize/v5/commands/version
-# sigs.k8s.io/kustomize/kyaml v0.19.0
+# sigs.k8s.io/kustomize/kyaml v0.18.1
 ## explicit; go 1.22.7
 sigs.k8s.io/kustomize/kyaml/comments
 sigs.k8s.io/kustomize/kyaml/copyutil
@@ -1492,8 +1498,8 @@ sigs.k8s.io/structured-merge-diff/v4/merge
 sigs.k8s.io/structured-merge-diff/v4/schema
 sigs.k8s.io/structured-merge-diff/v4/typed
 sigs.k8s.io/structured-merge-diff/v4/value
-# sigs.k8s.io/yaml v1.4.0
-## explicit; go 1.12
+# sigs.k8s.io/yaml v1.6.0
+## explicit; go 1.22
 sigs.k8s.io/yaml
 sigs.k8s.io/yaml/goyaml.v2
 sigs.k8s.io/yaml/goyaml.v3
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Dockerfile b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Dockerfile
index ad6c6b4b9b..651b3ca31e 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Dockerfile
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Dockerfile
@@ -14,7 +14,7 @@
 
 # Build the manager binary
 ARG GO_VERSION
-FROM golang:${GO_VERSION} AS builder
+FROM golang:${GO_VERSION:-1.24.11} AS builder
 WORKDIR /workspace
 
 # Run this with docker build --build_arg goproxy=$(go env GOPROXY) to override the goproxy
@@ -28,7 +28,7 @@ COPY go.sum go.sum
 # Cache deps before building and copying source so that we don't need to re-download as much
 # and so that source changes don't invalidate our downloaded layer
 RUN --mount=type=cache,target=/go/pkg/mod \
-    go mod download
+  go mod download
 
 # Copy the sources
 COPY ./ ./
@@ -40,10 +40,10 @@ ARG ldflags
 
 # Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder
 RUN --mount=type=cache,target=/root/.cache/go-build \
-    --mount=type=cache,target=/go/pkg/mod \
-    CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \
-    go build -ldflags "${ldflags} -extldflags '-static'" \
-    -o manager ${package}
+  --mount=type=cache,target=/go/pkg/mod \
+  CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \
+  go build -ldflags "${ldflags} -extldflags '-static'" \
+  -o manager ${package}
 
 # Production image
 FROM gcr.io/distroless/static:nonroot
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Makefile b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Makefile
index 48fcc2402b..39291b665c 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Makefile
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/Makefile
@@ -27,7 +27,7 @@ unexport GOPATH
 TRACE ?= 0
 
 # Go
-GO_VERSION ?= 1.23.10
+GO_VERSION ?= 1.24.11
 
 # Directories.
 ARTIFACTS ?= $(REPO_ROOT)/_artifacts
@@ -69,6 +69,11 @@ GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint
 GOTESTSUM := $(TOOLS_BIN_DIR)/gotestsum
 KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize
 MOCKGEN := $(TOOLS_BIN_DIR)/mockgen
+OPENAPI_GEN := $(TOOLS_BIN_DIR)/openapi-gen
+APPLYCONFIGURATION_GEN := $(TOOLS_BIN_DIR)/applyconfiguration-gen
+CLIENT_GEN := $(TOOLS_BIN_DIR)/client-gen
+LISTER_GEN := $(TOOLS_BIN_DIR)/lister-gen
+INFORMER_GEN := $(TOOLS_BIN_DIR)/informer-gen
 RELEASE_NOTES := $(TOOLS_BIN_DIR)/release-notes
 SETUP_ENVTEST := $(TOOLS_BIN_DIR)/setup-envtest
 GEN_CRD_API_REFERENCE_DOCS := $(TOOLS_BIN_DIR)/gen-crd-api-reference-docs
@@ -317,8 +322,66 @@ generate-controller-gen: $(CONTROLLER_GEN)
 		object:headerFile=./hack/boilerplate/boilerplate.generatego.txt
 
 .PHONY: generate-codegen
-generate-codegen: generate-controller-gen
-	./hack/update-codegen.sh
+generate-codegen: generate-controller-gen $(OPENAPI_GEN) $(APPLYCONFIGURATION_GEN) $(CLIENT_GEN) $(LISTER_GEN) $(INFORMER_GEN)
+	@echo "** Generating OpenAPI definitions **"
+	# The package list includes:
+	# - CAPO's own API packages (v1alpha1, v1alpha7, v1beta1) that have // +k8s:openapi-gen= markers
+	# - Dependency packages from CAPI and k8s.io that are referenced by CAPO's APIs
+	# - Base k8s.io/apimachinery packages
+	$(OPENAPI_GEN) \
+		--go-header-file=./hack/boilerplate.go.txt \
+		--output-file=zz_generated.openapi.go \
+		--output-dir=./cmd/models-schema \
+		--output-pkg=main \
+		--report-filename=./api_violations.report \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1 \
+		sigs.k8s.io/cluster-api/api/v1beta1 \
+		k8s.io/api/core/v1 \
+		k8s.io/apimachinery/pkg/apis/meta/v1 \
+		k8s.io/apimachinery/pkg/runtime \
+		k8s.io/apimachinery/pkg/version
+	@echo "** Generating openapi.json **"
+	go run ./cmd/models-schema | jq > ./openapi.json
+	@echo "** Generating applyconfiguration code **"
+	$(APPLYCONFIGURATION_GEN) \
+		--go-header-file=./hack/boilerplate.go.txt \
+		--output-dir=./pkg/generated/applyconfiguration \
+		--output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration \
+		--openapi-schema=./openapi.json \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1
+	@echo "** Generating clientset code **"
+	$(CLIENT_GEN) \
+		--go-header-file=./hack/boilerplate.go.txt \
+		--output-dir=./pkg/generated/clientset \
+		--output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/clientset \
+		--clientset-name=clientset \
+		--input-base=sigs.k8s.io/cluster-api-provider-openstack \
+		--apply-configuration-package=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration \
+		--input=api/v1alpha1 \
+		--input=api/v1alpha7 \
+		--input=api/v1beta1
+	@echo "** Generating lister code **"
+	$(LISTER_GEN) \
+		--go-header-file=./hack/boilerplate.go.txt \
+		--output-dir=./pkg/generated/listers \
+		--output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/listers \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1
+	@echo "** Generating informer code **"
+	$(INFORMER_GEN) \
+		--go-header-file=./hack/boilerplate.go.txt \
+		--output-dir=./pkg/generated/informers \
+		--output-pkg=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/informers \
+		--versioned-clientset-package=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/clientset/clientset \
+		--listers-package=sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/listers \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7 \
+		sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1
 
 .PHONY: generate-conversion-gen
 generate-conversion-gen: $(CONVERSION_GEN)
@@ -517,6 +580,8 @@ templates/cluster-template-%.yaml: kustomize/v1beta1/% $(KUSTOMIZE) FORCE
 .PHONY: release-templates
 release-templates: $(RELEASE_DIR) templates ## Generate release templates
 	cp templates/cluster-template*.yaml $(RELEASE_DIR)/
+	cp templates/clusterclass*.yaml $(RELEASE_DIR)/
+	cp templates/image-template*.yaml $(RELEASE_DIR)/
 
 IMAGE_PATCH_DIR := $(ARTIFACTS)/image-patch
 
@@ -587,7 +652,7 @@ clean-release-git: ## Restores the git files usually modified during a release
 	git restore ./*manager_image_patch.yaml ./*manager_pull_policy.yaml
 
 .PHONY: verify
-verify: verify-boilerplate verify-modules verify-gen verify-govulncheck
+verify: verify-boilerplate verify-modules verify-gen
 
 .PHONY: verify-boilerplate
 verify-boilerplate:
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS
index ea64e2e7b8..af00278ed6 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS
@@ -21,3 +21,5 @@ emeritus_approvers:
   - chrischdi
   - tobiasgiese
   - seanschneeweiss
+  - jichenjc
+  - mdbooth
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS_ALIASES b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS_ALIASES
index d2efce1b97..d5c4322787 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS_ALIASES
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/OWNERS_ALIASES
@@ -21,7 +21,6 @@ aliases:
   cluster-api-openstack-maintainers:
     - emilienm
     - lentzi90
-    - mdbooth
   cluster-api-openstack-reviewers:
-  cluster-api-openstack-emeritus-maintainers:
-    - jichenjc
+    - bnallapeta
+    - smoshiur1237
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/cloudbuild.yaml b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/cloudbuild.yaml
index 49ec67656a..23783a278d 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/cloudbuild.yaml
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/cloudbuild.yaml
@@ -4,15 +4,19 @@ options:
   substitution_option: ALLOW_LOOSE
   machineType: 'N1_HIGHCPU_8'
 steps:
-  - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20241229-5dc092c636'
-    entrypoint: make
-    env:
-    - DOCKER_CLI_EXPERIMENTAL=enabled
-    - TAG=$_GIT_TAG
-    - PULL_BASE_REF=$_PULL_BASE_REF
-    - DOCKER_BUILDKIT=1
-    args:
-    - release-staging
+# To check if the image can handle the build, you can try it like this:
+# docker run --rm -it -v $(pwd):/workspace gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:${TAG}
+# make clean # make sure we have something to build
+# make staging-manifests
+- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20250116-2a05ea7e3d'
+  entrypoint: make
+  env:
+  - DOCKER_CLI_EXPERIMENTAL=enabled
+  - TAG=$_GIT_TAG
+  - PULL_BASE_REF=$_PULL_BASE_REF
+  - DOCKER_BUILDKIT=1
+  args:
+  - release-staging
 substitutions:
   # _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
   # can be used as a substitution
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/netlify.toml b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/netlify.toml
index b7ab61100a..a915db07ec 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/netlify.toml
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/netlify.toml
@@ -4,7 +4,7 @@ command = "make -C docs/book build"
 publish = "docs/book/book"
 
 [build.environment]
-GO_VERSION = "1.23.10"
+GO_VERSION = "1.24.11"
 
 # Standard Netlify redirects
 [[redirects]]
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/network.go b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/network.go
index 2dbafc3eb4..4483c37829 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/network.go
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/network.go
@@ -25,6 +25,7 @@ import (
 	"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/external"
 	"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks"
 	"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets"
+	"k8s.io/apimachinery/pkg/api/equality"
 	"k8s.io/utils/ptr"
 
 	infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1"
@@ -201,6 +202,10 @@ func (s *Service) ReconcileSubnet(openStackCluster *infrav1.OpenStackCluster, cl
 	} else if len(subnetList) == 1 {
 		subnet = &subnetList[0]
 		s.scope.Logger().V(6).Info("Reusing existing subnet", "name", subnet.Name, "id", subnet.ID)
+
+		if err := s.updateSubnetDNSNameservers(openStackCluster, subnet); err != nil {
+			return err
+		}
 	}
 
 	openStackCluster.Status.Network.Subnets = []infrav1.Subnet{
@@ -248,6 +253,39 @@ func (s *Service) createSubnet(openStackCluster *infrav1.OpenStackCluster, clust
 	return subnet, nil
 }
 
+// updateSubnetDNSNameservers updates the DNS nameservers for an existing subnet if they differ from the desired configuration.
+func (s *Service) updateSubnetDNSNameservers(openStackCluster *infrav1.OpenStackCluster, subnet *subnets.Subnet) error {
+	// Picking the first managed subnet since we only support one for now
+	desiredNameservers := openStackCluster.Spec.ManagedSubnets[0].DNSNameservers
+	currentNameservers := subnet.DNSNameservers
+
+	var needsUpdate bool
+	if len(desiredNameservers) != len(currentNameservers) {
+		needsUpdate = true
+	} else {
+		needsUpdate = !equality.Semantic.DeepEqual(currentNameservers, desiredNameservers)
+	}
+
+	if needsUpdate {
+		s.scope.Logger().Info("Updating subnet DNS nameservers", "id", subnet.ID, "from", currentNameservers, "to", desiredNameservers)
+
+		updateOpts := subnets.UpdateOpts{
+			DNSNameservers: &desiredNameservers,
+		}
+
+		updatedSubnet, err := s.client.UpdateSubnet(subnet.ID, updateOpts)
+		if err != nil {
+			record.Warnf(openStackCluster, "FailedUpdateSubnet", "Failed to update DNS nameservers for subnet %s: %v", subnet.ID, err)
+			return err
+		}
+
+		*subnet = *updatedSubnet
+		record.Eventf(openStackCluster, "SuccessfulUpdateSubnet", "Updated DNS nameservers for subnet %s", subnet.ID)
+	}
+
+	return nil
+}
+
 func (s *Service) getNetworkByName(networkName string) (networks.Network, error) {
 	opts := networks.ListOpts{
 		Name: networkName,
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/securitygroups_rules.go b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/securitygroups_rules.go
index d74ff42185..110a89d441 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/securitygroups_rules.go
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/securitygroups_rules.go
@@ -258,20 +258,19 @@ func getSGWorkerAllowAll(remoteGroupIDSelf, secControlPlaneGroupID string) []res
 // Permit ports that defined in openStackCluster.Spec.APIServerLoadBalancer.AdditionalPorts.
 func getSGControlPlaneAdditionalPorts(ports []int) []resolvedSecurityGroupRuleSpec {
 	controlPlaneRules := []resolvedSecurityGroupRuleSpec{}
-
-	r := []resolvedSecurityGroupRuleSpec{
-		{
-			Description: "Additional ports",
-			Direction:   "ingress",
-			EtherType:   "IPv4",
-			Protocol:    "tcp",
-		},
-	}
+	// Preallocate r with len(ports)
+	r := make([]resolvedSecurityGroupRuleSpec, len(ports))
 	for i, p := range ports {
-		r[i].PortRangeMin = p
-		r[i].PortRangeMax = p
-		controlPlaneRules = append(controlPlaneRules, r...)
+		r[i] = resolvedSecurityGroupRuleSpec{
+			Description:  "Additional port",
+			Direction:    "ingress",
+			EtherType:    "IPv4",
+			Protocol:     "tcp",
+			PortRangeMin: p,
+			PortRangeMax: p,
+		}
 	}
+	controlPlaneRules = append(controlPlaneRules, r...)
 	return controlPlaneRules
 }
 
diff --git a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/webhooks/openstackcluster_webhook.go b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/webhooks/openstackcluster_webhook.go
index 19a2571b2a..a237572a8e 100644
--- a/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/webhooks/openstackcluster_webhook.go
+++ b/hack/tools/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/webhooks/openstackcluster_webhook.go
@@ -127,7 +127,7 @@ func allowSubnetFilterToIDTransition(oldObj, newObj *infrav1.OpenStackCluster) b
 }
 
 // ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type.
-func (*openStackClusterWebhook) ValidateUpdate(_ context.Context, oldObjRaw, newObjRaw runtime.Object) (admission.Warnings, error) {
+func (*openStackClusterWebhook) ValidateUpdate(_ context.Context, oldObjRaw, newObjRaw runtime.Object) (admission.Warnings, error) { //nolint:gocyclo,cyclop
 	var allErrs field.ErrorList
 	oldObj, err := castToOpenStackCluster(oldObjRaw)
 	if err != nil {
@@ -193,6 +193,40 @@ func (*openStackClusterWebhook) ValidateUpdate(_ context.Context, oldObjRaw, new
 		newObj.Spec.ManagedSecurityGroups.AllowAllInClusterTraffic = false
 	}
 
+	// Allow changes only to DNSNameservers in ManagedSubnets spec
+	if newObj.Spec.ManagedSubnets != nil && oldObj.Spec.ManagedSubnets != nil {
+		// Check if any fields other than DNSNameservers have changed
+		if len(oldObj.Spec.ManagedSubnets) != len(newObj.Spec.ManagedSubnets) {
+			allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "managedSubnets"), "cannot add or remove subnets"))
+		} else {
+			// Build maps of subnets by CIDR
+			oldSubnetMap := make(map[string]*infrav1.SubnetSpec)
+
+			for i := range oldObj.Spec.ManagedSubnets {
+				oldSubnet := &oldObj.Spec.ManagedSubnets[i]
+				oldSubnetMap[oldSubnet.CIDR] = oldSubnet
+			}
+
+			// Check if all new subnets have matching old subnets with the same CIDR
+			for i := range newObj.Spec.ManagedSubnets {
+				newSubnet := &newObj.Spec.ManagedSubnets[i]
+
+				oldSubnet, exists := oldSubnetMap[newSubnet.CIDR]
+				if !exists {
+					allErrs = append(allErrs, field.Forbidden(
+						field.NewPath("spec", "managedSubnets"),
+						fmt.Sprintf("cannot change subnet CIDR from existing value to %s", newSubnet.CIDR),
+					))
+					continue
+				}
+
+				// DNSNameservers is mutable
+				oldSubnet.DNSNameservers = nil
+				newSubnet.DNSNameservers = nil
+			}
+		}
+	}
+
 	// Allow changes on AllowedCIDRs
 	if newObj.Spec.APIServerLoadBalancer != nil && oldObj.Spec.APIServerLoadBalancer != nil {
 		oldObj.Spec.APIServerLoadBalancer.AllowedCIDRs = []string{}
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go
index a988b60e8d..bea5690c40 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go
@@ -192,14 +192,14 @@ func copyValueToTarget(target *yaml.RNode, value *yaml.RNode, selector *types.Ta
 			Path:   kyaml_utils.SmarterPathSplitter(fp, "."),
 			Create: createKind})
 		if err != nil {
-			return errors.WrapPrefixf(err, fieldRetrievalError(fp, createKind != 0)) //nolint:govet
+			return errors.WrapPrefixf(err, fieldRetrievalError(fp, createKind != 0))
 		}
 		targetFields, err := targetFieldList.Elements()
 		if err != nil {
-			return errors.WrapPrefixf(err, fieldRetrievalError(fp, createKind != 0)) //nolint:govet
+			return errors.WrapPrefixf(err, fieldRetrievalError(fp, createKind != 0))
 		}
 		if len(targetFields) == 0 {
-			return errors.Errorf(fieldRetrievalError(fp, createKind != 0)) //nolint:govet
+			return errors.Errorf(fieldRetrievalError(fp, createKind != 0))
 		}
 
 		for _, t := range targetFields {
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go
index d3a894123f..0f4008c97f 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go
@@ -170,7 +170,7 @@ func (ra *ResAccumulator) FixBackReferences() (err error) {
 
 // Intersection drops the resources which "other" does not have.
 func (ra *ResAccumulator) Intersection(other resmap.ResMap) error {
-	otherIds := other.AllIds() //nolint:revive
+	otherIds := other.AllIds()
 	for _, curId := range ra.resMap.AllIds() {
 		toDelete := true
 		for _, otherId := range otherIds {
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go
index 86017301ef..06d13b5261 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go
@@ -178,7 +178,6 @@ func (p *HelmChartInflationGeneratorPlugin) runHelmCommand(
 	}
 	if err != nil {
 		helm := p.h.GeneralConfig().HelmConfig.Command
-		//nolint:govet
 		err = errors.WrapPrefixf(
 			fmt.Errorf(
 				"unable to run: '%s %s' with env=%s (is '%s' installed?): %w",
@@ -301,7 +300,7 @@ func (p *HelmChartInflationGeneratorPlugin) Generate() (rm resmap.ResMap, err er
 	}
 	// try to remove the contents before first "---" because
 	// helm may produce messages to stdout before it
-	r := &kio.ByteReader{Reader: bytes.NewBuffer(stdout), OmitReaderAnnotations: true}
+	r := &kio.ByteReader{Reader: bytes.NewBufferString(string(stdout)), OmitReaderAnnotations: true}
 	nodes, err := r.Read()
 	if err != nil {
 		return nil, fmt.Errorf("error reading helm output: %w", err)
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go
index d839fb9751..30a88340ff 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go
@@ -14,8 +14,6 @@ import (
 )
 
 // Change or set the namespace of non-cluster level resources.
-//
-//nolint:tagalign
 type NamespaceTransformerPlugin struct {
 	types.ObjectMeta       `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
 	FieldSpecs             []types.FieldSpec                `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"`
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/fileloader.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/fileloader.go
index 69b8295eb7..6ecc9fcefa 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/fileloader.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/fileloader.go
@@ -169,7 +169,7 @@ func (fl *FileLoader) New(path string) (ifc.Loader, error) {
 	}
 	root, err := filesys.ConfirmDir(fl.fSys, fl.root.Join(path))
 	if err != nil {
-		return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error()) //nolint:govet
+		return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error())
 	}
 	if err = fl.errIfGitContainmentViolation(root); err != nil {
 		return nil, err
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/loader.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/loader.go
index 60b254fa7e..e10885b9b7 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/loader.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/loader/loader.go
@@ -28,7 +28,7 @@ func NewLoader(
 	}
 	root, err := filesys.ConfirmDir(fSys, target)
 	if err != nil {
-		return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error()) //nolint:govet
+		return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error())
 	}
 	return newLoaderAtConfirmedDir(
 		lr, root, fSys, nil, git.ClonerUsingGitExec), nil
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go
index b5d4b7aec1..c539c290d5 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go
@@ -15,8 +15,6 @@ import (
 )
 
 // TransformerConfig holds the data needed to perform transformations.
-//
-//nolint:tagalign
 type TransformerConfig struct {
 	// if any fields are added, update the DeepCopy implementation
 	NamePrefix        types.FsSlice `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"`
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go
index 108c3b2908..71c52884f0 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go
@@ -178,7 +178,6 @@ func (p *ExecPlugin) invokePlugin(input []byte) ([]byte, error) {
 	}
 	result, err := cmd.Output()
 	if err != nil {
-		//nolint:govet
 		return nil, errors.WrapPrefixf(
 			fmt.Errorf("failure in plugin configured via %s; %w",
 				f.Name(), err), stdErr.String())
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go
index 2edf8791ff..e494df767e 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go
@@ -34,8 +34,7 @@ type Loader struct {
 }
 
 func NewLoader(
-	pc *types.PluginConfig, rf *resmap.Factory, fs filesys.FileSystem,
-) *Loader {
+	pc *types.PluginConfig, rf *resmap.Factory, fs filesys.FileSystem) *Loader {
 	return &Loader{pc: pc, rf: rf, fs: fs}
 }
 
@@ -59,8 +58,7 @@ func (l *Loader) Config() *types.PluginConfig {
 
 func (l *Loader) LoadGenerators(
 	ldr ifc.Loader, v ifc.Validator, rm resmap.ResMap) (
-	result []*resmap.GeneratorWithProperties, err error,
-) {
+	result []*resmap.GeneratorWithProperties, err error) {
 	for _, res := range rm.Resources() {
 		g, err := l.LoadGenerator(ldr, v, res)
 		if err != nil {
@@ -76,8 +74,7 @@ func (l *Loader) LoadGenerators(
 }
 
 func (l *Loader) LoadGenerator(
-	ldr ifc.Loader, v ifc.Validator, res *resource.Resource,
-) (resmap.Generator, error) {
+	ldr ifc.Loader, v ifc.Validator, res *resource.Resource) (resmap.Generator, error) {
 	c, err := l.loadAndConfigurePlugin(ldr, v, res)
 	if err != nil {
 		return nil, err
@@ -90,8 +87,7 @@ func (l *Loader) LoadGenerator(
 }
 
 func (l *Loader) LoadTransformers(
-	ldr ifc.Loader, v ifc.Validator, rm resmap.ResMap,
-) ([]*resmap.TransformerWithProperties, error) {
+	ldr ifc.Loader, v ifc.Validator, rm resmap.ResMap) ([]*resmap.TransformerWithProperties, error) {
 	var result []*resmap.TransformerWithProperties
 	for _, res := range rm.Resources() {
 		t, err := l.LoadTransformer(ldr, v, res)
@@ -108,8 +104,7 @@ func (l *Loader) LoadTransformers(
 }
 
 func (l *Loader) LoadTransformer(
-	ldr ifc.Loader, v ifc.Validator, res *resource.Resource,
-) (*resmap.TransformerWithProperties, error) {
+	ldr ifc.Loader, v ifc.Validator, res *resource.Resource) (*resmap.TransformerWithProperties, error) {
 	c, err := l.loadAndConfigurePlugin(ldr, v, res)
 	if err != nil {
 		return nil, err
@@ -184,8 +179,7 @@ func isBuiltinPlugin(res *resource.Resource) bool {
 func (l *Loader) loadAndConfigurePlugin(
 	ldr ifc.Loader,
 	v ifc.Validator,
-	res *resource.Resource,
-) (c resmap.Configurable, err error) {
+	res *resource.Resource) (c resmap.Configurable, err error) {
 	if isBuiltinPlugin(res) {
 		switch l.pc.BpLoadingOptions {
 		case types.BploLoadFromFileSys:
@@ -198,7 +192,7 @@ func (l *Loader) loadAndConfigurePlugin(
 			c, err = l.makeBuiltinPlugin(res.GetGvk())
 		default:
 			err = fmt.Errorf(
-				"unknown plugin loader behavior specified: %s %v", res.GetGvk().String(),
+				"unknown plugin loader behavior specified: %v",
 				l.pc.BpLoadingOptions)
 		}
 	} else {
@@ -288,3 +282,4 @@ func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, err
 	}
 	return c, nil
 }
+
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go b/hack/tools/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go
index 25b1a5aaca..93cf91a006 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go
@@ -139,7 +139,12 @@ func (o *addMetadataOptions) addAnnotations(m *types.Kustomization) error {
 
 func (o *addMetadataOptions) addLabels(m *types.Kustomization) error {
 	if o.labelsWithoutSelector {
-		return o.writeToLabels(m, label)
+		m.Labels = append(m.Labels, types.Label{
+			Pairs:            make(map[string]string),
+			IncludeSelectors: false,
+			IncludeTemplates: o.includeTemplates,
+		})
+		return o.writeToMap(m.Labels[len(m.Labels)-1].Pairs, label)
 	}
 	if m.CommonLabels == nil {
 		m.CommonLabels = make(map[string]string)
@@ -149,67 +154,10 @@ func (o *addMetadataOptions) addLabels(m *types.Kustomization) error {
 
 func (o *addMetadataOptions) writeToMap(m map[string]string, kind kindOfAdd) error {
 	for k, v := range o.metadata {
-		if err := o.writeToMapEntry(m, k, v, kind); err != nil {
-			return err
+		if _, ok := m[k]; ok && !o.force {
+			return fmt.Errorf("%s %s already in kustomization file", kind, k)
 		}
+		m[k] = v
 	}
 	return nil
 }
-
-func (o *addMetadataOptions) writeToMapEntry(m map[string]string, k, v string, kind kindOfAdd) error {
-	if _, ok := m[k]; ok && !o.force {
-		return fmt.Errorf("%s %s already in kustomization file. Use --force to override.", kind, k)
-	}
-	m[k] = v
-	return nil
-}
-
-func (o *addMetadataOptions) writeToLabels(m *types.Kustomization, kind kindOfAdd) error {
-	lbl := types.Label{
-		Pairs:            make(map[string]string),
-		IncludeSelectors: false,
-		IncludeTemplates: o.includeTemplates,
-	}
-	for k, v := range o.metadata {
-		if i, ok := o.findLabelKeyIndex(m, lbl, k); ok {
-			if err := o.writeToMapEntry(m.Labels[i].Pairs, k, v, kind); err != nil {
-				return err
-			}
-			continue
-		}
-		if i, ok := o.findLabelIndex(m, lbl); ok {
-			if err := o.writeToMapEntry(m.Labels[i].Pairs, k, v, kind); err != nil {
-				return err
-			}
-			continue
-		}
-		if err := o.writeToMap(lbl.Pairs, kind); err != nil {
-			return err
-		}
-		m.Labels = append(m.Labels, lbl)
-	}
-	return nil
-}
-
-func (o *addMetadataOptions) matchLabelSettings(lbl1, lbl2 types.Label) bool {
-	return lbl1.IncludeSelectors == lbl2.IncludeSelectors &&
-		lbl1.IncludeTemplates == lbl2.IncludeTemplates
-}
-
-func (o *addMetadataOptions) findLabelIndex(m *types.Kustomization, lbl types.Label) (int, bool) {
-	for i, ml := range m.Labels {
-		if o.matchLabelSettings(ml, lbl) {
-			return i, true
-		}
-	}
-	return 0, false
-}
-
-func (o *addMetadataOptions) findLabelKeyIndex(m *types.Kustomization, lbl types.Label, key string) (int, bool) {
-	if i, found := o.findLabelIndex(m, lbl); found {
-		if _, ok := m.Labels[i].Pairs[key]; ok {
-			return i, true
-		}
-	}
-	return 0, false
-}
diff --git a/hack/tools/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go b/hack/tools/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go
index c29d5ad8a7..79dfc53bf5 100644
--- a/hack/tools/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go
+++ b/hack/tools/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go
@@ -78,7 +78,6 @@ func ConfirmDir(fSys FileSystem, path string) (ConfirmedDir, error) {
 		return "", errors.WrapPrefixf(err, "not a valid directory")
 	}
 	if f != "" {
-		//nolint:govet
 		return "", errors.WrapPrefixf(errors.Errorf("file is not directory"), fmt.Sprintf("'%s'", path))
 	}
 	return d, nil
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/.travis.yml b/hack/tools/vendor/sigs.k8s.io/yaml/.travis.yml
deleted file mode 100644
index 54ed8f9cb9..0000000000
--- a/hack/tools/vendor/sigs.k8s.io/yaml/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-arch: arm64
-dist: focal
-go: 1.15.x
-script:
-  - diff -u <(echo -n) <(gofmt -d *.go)
-  - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
-  - GO111MODULE=on go vet .
-  - GO111MODULE=on go test -v -race ./...
-  - git diff --exit-code
-install:
-  - GO111MODULE=off go get golang.org/x/lint/golint
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
deleted file mode 100644
index 73be0a3a9b..0000000000
--- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
+++ /dev/null
@@ -1,24 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-approvers:
-- dims
-- jpbetz
-- smarterclayton
-- deads2k
-- sttts
-- liggitt
-- natasha41575
-- knverey
-reviewers:
-- dims
-- thockin
-- jpbetz
-- smarterclayton
-- deads2k
-- derekwaynecarr
-- mikedanese
-- liggitt
-- sttts
-- tallclair
-labels:
-- sig/api-machinery
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
index 53f4139dc3..9a8f1e6782 100644
--- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
+++ b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
@@ -1,143 +1,71 @@
-# go-yaml fork
+# goyaml.v2
 
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
+This package provides type and function aliases for the `go.yaml.in/yaml/v2` package (which is compatible with `gopkg.in/yaml.v2`).
 
-This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
+## Purpose
 
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.1 and 1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v2*.
-
-To install it, run:
-
-    go get gopkg.in/yaml.v2
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
-  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
-
-API stability
--------------
-
-The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
-        "fmt"
-        "log"
-
-        "gopkg.in/yaml.v2"
-)
-
-var data = `
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-`
-
-// Note: struct fields must be public in order for unmarshal to
-// correctly populate the data.
-type T struct {
-        A string
-        B struct {
-                RenamedC int   `yaml:"c"`
-                D        []int `yaml:",flow"`
-        }
-}
-
-func main() {
-        t := T{}
-    
-        err := yaml.Unmarshal([]byte(data), &t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t:\n%v\n\n", t)
-    
-        d, err := yaml.Marshal(&t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t dump:\n%s\n\n", string(d))
-    
-        m := make(map[interface{}]interface{})
-    
-        err = yaml.Unmarshal([]byte(data), &m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m:\n%v\n\n", m)
-    
-        d, err = yaml.Marshal(&m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
+The purpose of this package is to:
 
-This example will generate the following output:
+1. Provide a transition path for users migrating from the sigs.k8s.io/yaml package to direct usage of go.yaml.in/yaml/v2
+2. Maintain compatibility with existing code while encouraging migration to the upstream package
+3. Reduce maintenance overhead by delegating to the upstream implementation
 
+## Usage
+
+Instead of importing this package directly, you should migrate to using `go.yaml.in/yaml/v2` directly:
+
+```go
+// Old way
+import "sigs.k8s.io/yaml/goyaml.v2"
+
+// Recommended way
+import "go.yaml.in/yaml/v2"
 ```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
-  c: 2
-  d:
-  - 3
-  - 4
-```
 
+## Available Types and Functions
+
+All public types and functions from `go.yaml.in/yaml/v2` are available through this package:
+
+### Types
+
+- `MapSlice` - Encodes and decodes as a YAML map with preserved key order
+- `MapItem` - An item in a MapSlice
+- `Unmarshaler` - Interface for custom unmarshaling behavior
+- `Marshaler` - Interface for custom marshaling behavior
+- `IsZeroer` - Interface to check if an object is zero
+- `Decoder` - Reads and decodes YAML values from an input stream
+- `Encoder` - Writes YAML values to an output stream
+- `TypeError` - Error returned by Unmarshal for decoding issues
+
+### Functions
+
+- `Unmarshal` - Decodes YAML data into a Go value
+- `UnmarshalStrict` - Like Unmarshal but errors on unknown fields
+- `Marshal` - Serializes a Go value into YAML
+- `NewDecoder` - Creates a new Decoder
+- `NewEncoder` - Creates a new Encoder
+- `FutureLineWrap` - Controls line wrapping behavior
+
+## Migration Guide
+
+To migrate from this package to `go.yaml.in/yaml/v2`:
+
+1. Update your import statements:
+   ```go
+   // From
+   import "sigs.k8s.io/yaml/goyaml.v2"
+   
+   // To
+   import "go.yaml.in/yaml/v2"
+   ```
+
+2. No code changes should be necessary as the API is identical
+
+3. Update your go.mod file to include the dependency:
+   ```
+   require go.yaml.in/yaml/v2 v2.4.2
+   ```
+
+## Deprecation Notice
+
+All types and functions in this package are marked as deprecated. You should migrate to using `go.yaml.in/yaml/v2` directly.
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go
new file mode 100644
index 0000000000..8c82bc2cb9
--- /dev/null
+++ b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import (
+	gopkg_yaml "go.yaml.in/yaml/v2"
+)
+
+// Type aliases for public types from go.yaml.in/yaml/v2
+type (
+	// MapSlice encodes and decodes as a YAML map.
+	// The order of keys is preserved when encoding and decoding.
+	// Deprecated: Use go.yaml.in/yaml/v2.MapSlice directly.
+	MapSlice = gopkg_yaml.MapSlice
+
+	// MapItem is an item in a MapSlice.
+	// Deprecated: Use go.yaml.in/yaml/v2.MapItem directly.
+	MapItem = gopkg_yaml.MapItem
+
+	// Unmarshaler is implemented by types to customize their behavior when being unmarshaled from a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v2.Unmarshaler directly.
+	Unmarshaler = gopkg_yaml.Unmarshaler
+
+	// Marshaler is implemented by types to customize their behavior when being marshaled into a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v2.Marshaler directly.
+	Marshaler = gopkg_yaml.Marshaler
+
+	// IsZeroer is used to check whether an object is zero to determine whether it should be omitted when
+	// marshaling with the omitempty flag. One notable implementation is time.Time.
+	// Deprecated: Use go.yaml.in/yaml/v2.IsZeroer directly.
+	IsZeroer = gopkg_yaml.IsZeroer
+
+	// Decoder reads and decodes YAML values from an input stream.
+	// Deprecated: Use go.yaml.in/yaml/v2.Decoder directly.
+	Decoder = gopkg_yaml.Decoder
+
+	// Encoder writes YAML values to an output stream.
+	// Deprecated: Use go.yaml.in/yaml/v2.Encoder directly.
+	Encoder = gopkg_yaml.Encoder
+
+	// TypeError is returned by Unmarshal when one or more fields in the YAML document cannot be properly decoded.
+	// Deprecated: Use go.yaml.in/yaml/v2.TypeError directly.
+	TypeError = gopkg_yaml.TypeError
+)
+
+// Function aliases for public functions from go.yaml.in/yaml/v2
+var (
+	// Unmarshal decodes the first document found within the in byte slice and assigns decoded values into the out value.
+	// Deprecated: Use go.yaml.in/yaml/v2.Unmarshal directly.
+	Unmarshal = gopkg_yaml.Unmarshal
+
+	// UnmarshalStrict is like Unmarshal except that any fields that are found in the data that do not have corresponding struct members will result in an error.
+	// Deprecated: Use go.yaml.in/yaml/v2.UnmarshalStrict directly.
+	UnmarshalStrict = gopkg_yaml.UnmarshalStrict
+
+	// Marshal serializes the value provided into a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v2.Marshal directly.
+	Marshal = gopkg_yaml.Marshal
+
+	// NewDecoder returns a new decoder that reads from r.
+	// Deprecated: Use go.yaml.in/yaml/v2.NewDecoder directly.
+	NewDecoder = gopkg_yaml.NewDecoder
+
+	// NewEncoder returns a new encoder that writes to w.
+	// Deprecated: Use go.yaml.in/yaml/v2.NewEncoder directly.
+	NewEncoder = gopkg_yaml.NewEncoder
+
+	// FutureLineWrap globally disables line wrapping when encoding long strings.
+	// Deprecated: Use go.yaml.in/yaml/v2.FutureLineWrap directly.
+	FutureLineWrap = gopkg_yaml.FutureLineWrap
+)
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS
deleted file mode 100644
index 73be0a3a9b..0000000000
--- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS
+++ /dev/null
@@ -1,24 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-approvers:
-- dims
-- jpbetz
-- smarterclayton
-- deads2k
-- sttts
-- liggitt
-- natasha41575
-- knverey
-reviewers:
-- dims
-- thockin
-- jpbetz
-- smarterclayton
-- deads2k
-- derekwaynecarr
-- mikedanese
-- liggitt
-- sttts
-- tallclair
-labels:
-- sig/api-machinery
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md
index b1a6b2e9e2..318a00923b 100644
--- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md
+++ b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md
@@ -1,160 +1,70 @@
-# go-yaml fork
+# goyaml.v3
 
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
+This package provides type and function aliases for the `go.yaml.in/yaml/v3` package (which is compatible with `gopkg.in/yaml.v3`).
 
-This fork is based on v3.0.1: https://github.com/go-yaml/yaml/releases/tag/v3.0.1.
+## Purpose
 
-# YAML support for the Go language
+The purpose of this package is to:
 
-Introduction
-------------
+1. Provide a transition path for users migrating from the sigs.k8s.io/yaml package to direct usage of go.yaml.in/yaml/v3
+2. Maintain compatibility with existing code while encouraging migration to the upstream package
+3. Reduce maintenance overhead by delegating to the upstream implementation
 
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
+## Usage
 
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.2, but preserves some behavior
-from 1.1 for backwards compatibility.
-
-Specifically, as of v3 of the yaml package:
-
- - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
-   decoded into a typed bool value. Otherwise they behave as a string. Booleans
-   in YAML 1.2 are _true/false_ only.
- - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
-   as specified in YAML 1.2, because most parsers still use the old format.
-   Octals in the  _0o777_ format are supported though, so new files work.
- - Does not support base-60 floats. These are gone from YAML 1.2, and were
-   actually never supported by this package as it's clearly a poor choice.
-
-and offers backwards
-compatibility with YAML 1.1 in some cases.
-1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v3*.
-
-To install it, run:
-
-    go get gopkg.in/yaml.v3
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
-  - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
-
-API stability
--------------
-
-The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the MIT and Apache License 2.0 licenses.
-Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
-        "fmt"
-        "log"
-
-        "gopkg.in/yaml.v3"
-)
-
-var data = `
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-`
-
-// Note: struct fields must be public in order for unmarshal to
-// correctly populate the data.
-type T struct {
-        A string
-        B struct {
-                RenamedC int   `yaml:"c"`
-                D        []int `yaml:",flow"`
-        }
-}
-
-func main() {
-        t := T{}
-    
-        err := yaml.Unmarshal([]byte(data), &t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t:\n%v\n\n", t)
-    
-        d, err := yaml.Marshal(&t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t dump:\n%s\n\n", string(d))
-    
-        m := make(map[interface{}]interface{})
-    
-        err = yaml.Unmarshal([]byte(data), &m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m:\n%v\n\n", m)
-    
-        d, err = yaml.Marshal(&m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
+Instead of importing this package directly, you should migrate to using `go.yaml.in/yaml/v3` directly:
 
-This example will generate the following output:
+```go
+// Old way
+import "sigs.k8s.io/yaml/goyaml.v3"
 
+// Recommended way
+import "go.yaml.in/yaml/v3"
 ```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
-  c: 2
-  d:
-  - 3
-  - 4
-```
 
+## Available Types and Functions
+
+All public types and functions from `go.yaml.in/yaml/v3` are available through this package:
+
+### Types
+
+- `Unmarshaler` - Interface for custom unmarshaling behavior
+- `Marshaler` - Interface for custom marshaling behavior
+- `IsZeroer` - Interface to check if an object is zero
+- `Decoder` - Reads and decodes YAML values from an input stream
+- `Encoder` - Writes YAML values to an output stream
+- `TypeError` - Error returned by Unmarshal for decoding issues
+- `Node` - Represents a YAML node in the document
+- `Kind` - Represents the kind of a YAML node
+- `Style` - Represents the style of a YAML node
+
+### Functions
+
+- `Unmarshal` - Decodes YAML data into a Go value
+- `Marshal` - Serializes a Go value into YAML
+- `NewDecoder` - Creates a new Decoder
+- `NewEncoder` - Creates a new Encoder
+
+## Migration Guide
+
+To migrate from this package to `go.yaml.in/yaml/v3`:
+
+1. Update your import statements:
+   ```go
+   // From
+   import "sigs.k8s.io/yaml/goyaml.v3"
+   
+   // To
+   import "go.yaml.in/yaml/v3"
+   ```
+
+2. No code changes should be necessary as the API is identical
+
+3. Update your go.mod file to include the dependency:
+   ```
+   require go.yaml.in/yaml/v3 v3.0.3
+   ```
+
+## Deprecation Notice
+
+All types and functions in this package are marked as deprecated. You should migrate to using `go.yaml.in/yaml/v3` directly.
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go
deleted file mode 100644
index b98c3321ed..0000000000
--- a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright 2023 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-// yaml_emitter_increase_indent preserves the original signature and delegates to
-// yaml_emitter_increase_indent_compact without compact-sequence indentation
-func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
-	return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false)
-}
-
-// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
-func (e *Encoder) CompactSeqIndent() {
-	e.encoder.emitter.compact_sequence_indent = true
-}
-
-// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
-func (e *Encoder) DefaultSeqIndent() {
-	e.encoder.emitter.compact_sequence_indent = false
-}
-
-// yaml_emitter_process_line_comment preserves the original signature and delegates to
-// yaml_emitter_process_line_comment_linebreak passing false for linebreak
-func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
-	return yaml_emitter_process_line_comment_linebreak(emitter, false)
-}
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go
new file mode 100644
index 0000000000..8826ffefeb
--- /dev/null
+++ b/hack/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import (
+	gopkg_yaml "go.yaml.in/yaml/v3"
+)
+
+// Type aliases for public types from go.yaml.in/yaml/v3
+type (
+	// Unmarshaler is implemented by types to customize their behavior when being unmarshaled from a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v3.Unmarshaler directly.
+	Unmarshaler = gopkg_yaml.Unmarshaler
+
+	// Marshaler is implemented by types to customize their behavior when being marshaled into a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v3.Marshaler directly.
+	Marshaler = gopkg_yaml.Marshaler
+
+	// IsZeroer is used to check whether an object is zero to determine whether it should be omitted when
+	// marshaling with the omitempty flag. One notable implementation is time.Time.
+	// Deprecated: Use go.yaml.in/yaml/v3.IsZeroer directly.
+	IsZeroer = gopkg_yaml.IsZeroer
+
+	// Decoder reads and decodes YAML values from an input stream.
+	// Deprecated: Use go.yaml.in/yaml/v3.Decoder directly.
+	Decoder = gopkg_yaml.Decoder
+
+	// Encoder writes YAML values to an output stream.
+	// Deprecated: Use go.yaml.in/yaml/v3.Encoder directly.
+	Encoder = gopkg_yaml.Encoder
+
+	// TypeError is returned by Unmarshal when one or more fields in the YAML document cannot be properly decoded.
+	// Deprecated: Use go.yaml.in/yaml/v3.TypeError directly.
+	TypeError = gopkg_yaml.TypeError
+
+	// Node represents a YAML node in the document.
+	// Deprecated: Use go.yaml.in/yaml/v3.Node directly.
+	Node = gopkg_yaml.Node
+
+	// Kind represents the kind of a YAML node.
+	// Deprecated: Use go.yaml.in/yaml/v3.Kind directly.
+	Kind = gopkg_yaml.Kind
+
+	// Style represents the style of a YAML node.
+	// Deprecated: Use go.yaml.in/yaml/v3.Style directly.
+	Style = gopkg_yaml.Style
+)
+
+// Constants for Kind type from go.yaml.in/yaml/v3
+const (
+	// DocumentNode represents a YAML document node.
+	// Deprecated: Use go.yaml.in/yaml/v3.DocumentNode directly.
+	DocumentNode = gopkg_yaml.DocumentNode
+
+	// SequenceNode represents a YAML sequence node.
+	// Deprecated: Use go.yaml.in/yaml/v3.SequenceNode directly.
+	SequenceNode = gopkg_yaml.SequenceNode
+
+	// MappingNode represents a YAML mapping node.
+	// Deprecated: Use go.yaml.in/yaml/v3.MappingNode directly.
+	MappingNode = gopkg_yaml.MappingNode
+
+	// ScalarNode represents a YAML scalar node.
+	// Deprecated: Use go.yaml.in/yaml/v3.ScalarNode directly.
+	ScalarNode = gopkg_yaml.ScalarNode
+
+	// AliasNode represents a YAML alias node.
+	// Deprecated: Use go.yaml.in/yaml/v3.AliasNode directly.
+	AliasNode = gopkg_yaml.AliasNode
+)
+
+// Constants for Style type from go.yaml.in/yaml/v3
+const (
+	// TaggedStyle represents a tagged YAML style.
+	// Deprecated: Use go.yaml.in/yaml/v3.TaggedStyle directly.
+	TaggedStyle = gopkg_yaml.TaggedStyle
+
+	// DoubleQuotedStyle represents a double-quoted YAML style.
+	// Deprecated: Use go.yaml.in/yaml/v3.DoubleQuotedStyle directly.
+	DoubleQuotedStyle = gopkg_yaml.DoubleQuotedStyle
+
+	// SingleQuotedStyle represents a single-quoted YAML style.
+	// Deprecated: Use go.yaml.in/yaml/v3.SingleQuotedStyle directly.
+	SingleQuotedStyle = gopkg_yaml.SingleQuotedStyle
+
+	// LiteralStyle represents a literal YAML style.
+	// Deprecated: Use go.yaml.in/yaml/v3.LiteralStyle directly.
+	LiteralStyle = gopkg_yaml.LiteralStyle
+
+	// FoldedStyle represents a folded YAML style.
+	// Deprecated: Use go.yaml.in/yaml/v3.FoldedStyle directly.
+	FoldedStyle = gopkg_yaml.FoldedStyle
+
+	// FlowStyle represents a flow YAML style.
+	// Deprecated: Use go.yaml.in/yaml/v3.FlowStyle directly.
+	FlowStyle = gopkg_yaml.FlowStyle
+)
+
+// Function aliases for public functions from go.yaml.in/yaml/v3
+var (
+	// Unmarshal decodes the first document found within the in byte slice and assigns decoded values into the out value.
+	// Deprecated: Use go.yaml.in/yaml/v3.Unmarshal directly.
+	Unmarshal = gopkg_yaml.Unmarshal
+
+	// Marshal serializes the value provided into a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v3.Marshal directly.
+	Marshal = gopkg_yaml.Marshal
+
+	// NewDecoder returns a new decoder that reads from r.
+	// Deprecated: Use go.yaml.in/yaml/v3.NewDecoder directly.
+	NewDecoder = gopkg_yaml.NewDecoder
+
+	// NewEncoder returns a new encoder that writes to w.
+	// Deprecated: Use go.yaml.in/yaml/v3.NewEncoder directly.
+	NewEncoder = gopkg_yaml.NewEncoder
+)
diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/yaml.go b/hack/tools/vendor/sigs.k8s.io/yaml/yaml.go
index fc10246bdb..aa01acd45d 100644
--- a/hack/tools/vendor/sigs.k8s.io/yaml/yaml.go
+++ b/hack/tools/vendor/sigs.k8s.io/yaml/yaml.go
@@ -24,7 +24,7 @@ import (
 	"reflect"
 	"strconv"
 
-	"sigs.k8s.io/yaml/goyaml.v2"
+	"go.yaml.in/yaml/v2"
 )
 
 // Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference)
@@ -92,7 +92,7 @@ func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error {
 		d = opt(d)
 	}
 	if err := d.Decode(&obj); err != nil {
-		return fmt.Errorf("while decoding JSON: %v", err)
+		return fmt.Errorf("while decoding JSON: %w", err)
 	}
 	return nil
 }
@@ -417,3 +417,10 @@ func jsonToYAMLValue(j interface{}) interface{} {
 	}
 	return j
 }
+
+// DisallowUnknownFields configures the JSON decoder to error out if unknown
+// fields come along, instead of dropping them by default.
+func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
+	d.DisallowUnknownFields()
+	return d
+}
diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh
deleted file mode 100755
index ae760e6fea..0000000000
--- a/hack/update-codegen.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2024 The Kubernetes Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# 	http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-set -x
-
-SCRIPT_ROOT=$(realpath $(dirname "${BASH_SOURCE[0]}"))
-PROJECT_ROOT=$(realpath "${SCRIPT_ROOT}/..")
-
-GENERATED_PKG="pkg/generated"
-
-# Ensure tools built by kube_codegen go in our own GOBIN rather than something
-# shared under GOPATH. This guards against these tools being rebuilt by some
-# other concurrent invocation, potentially with a different version.
-export GOBIN="${PROJECT_ROOT}/bin"
-
-cd "$PROJECT_ROOT"
-
-# For this to work, the current working directory must be under a Go module which
-# lists k8s.io/code-generator
-CODEGEN_PKG=$(go list -f '{{ .Dir }}' k8s.io/code-generator)
-
-source "${CODEGEN_PKG}/kube_codegen.sh"
-
-# Deep-copies and what-not are generated by controller-gen, so we don't need to use kube::codegen::gen_helpers
-
-declare -a gen_openapi_args=(
-    --report-filename "${PROJECT_ROOT}/api_violations.report"
-    --output-dir "${PROJECT_ROOT}/cmd/models-schema"
-    --output-pkg main
-    --boilerplate "${SCRIPT_ROOT}/boilerplate.go.txt"
-
-    # We need to include all referenced types in our generated openapi schema
-    # or applyconfiguration-gen won't be able to use it. Helpfully it will
-    # generate an error including the missing type.
-    --extra-pkgs sigs.k8s.io/cluster-api/api/v1beta1
-    --extra-pkgs k8s.io/api/core/v1
-)
-
-# It is an error to make a change which updates the api violations. When doing
-# this intentionally, for example when fixing violations, run with
-# UPDATE_API_KNOWN_VIOLATIONS=true to update the api violations report.
-if [ ! -z "${UPDATE_API_KNOWN_VIOLATIONS:-}" ]; then
-    gen_openapi_args+=(--update-report)
-fi
-
-kube::codegen::gen_openapi "${gen_openapi_args[@]}" "${PROJECT_ROOT}/api"
-
-openapi="${PROJECT_ROOT}/openapi.json"
-go run "${PROJECT_ROOT}/cmd/models-schema" | jq > "$openapi"
-
-kube::codegen::gen_client \
-    --with-applyconfig \
-    --with-watch \
-    --applyconfig-openapi-schema "$openapi" \
-    --output-dir "${PROJECT_ROOT}/${GENERATED_PKG}" \
-    --output-pkg sigs.k8s.io/cluster-api-provider-openstack/${GENERATED_PKG} \
-    --versioned-name clientset \
-    --boilerplate "${SCRIPT_ROOT}/boilerplate.go.txt" \
-    --one-input-api "api" \
-    "${PROJECT_ROOT}"
diff --git a/kustomize/v1beta1/default/cluster-template.yaml b/kustomize/v1beta1/default/cluster-template.yaml
index ae7db083cf..d27f558a31 100644
--- a/kustomize/v1beta1/default/cluster-template.yaml
+++ b/kustomize/v1beta1/default/cluster-template.yaml
@@ -73,9 +73,6 @@ spec:
           cloud-provider: external
           provider-id: "openstack:///'{{ instance_id }}'"
     clusterConfiguration:
-      apiServer:
-        extraArgs:
-          cloud-provider: external
       controllerManager:
         extraArgs:
           cloud-provider: external
diff --git a/netlify.toml b/netlify.toml
index b7ab61100a..a915db07ec 100644
--- a/netlify.toml
+++ b/netlify.toml
@@ -4,7 +4,7 @@ command = "make -C docs/book build"
 publish = "docs/book/book"
 
 [build.environment]
-GO_VERSION = "1.23.10"
+GO_VERSION = "1.24.11"
 
 # Standard Netlify redirects
 [[redirects]]
diff --git a/openshift/go.mod b/openshift/go.mod
index e2453f3fd8..0ba963834b 100644
--- a/openshift/go.mod
+++ b/openshift/go.mod
@@ -4,22 +4,23 @@ go 1.23.0
 
 require (
 	github.com/go-logr/logr v1.4.3
-	github.com/gophercloud/gophercloud/v2 v2.7.0
-	github.com/onsi/ginkgo/v2 v2.23.4
-	github.com/onsi/gomega v1.37.0
+	github.com/gophercloud/gophercloud/v2 v2.9.0
+	github.com/onsi/ginkgo/v2 v2.27.3
+	github.com/onsi/gomega v1.38.2
 	github.com/openshift/api v0.0.0-20231003083825-c3f7566f6ef6
 	github.com/openshift/cluster-capi-operator/e2e v0.0.0-20250123104340-ddf11cf94960
-	k8s.io/api v0.31.9
-	k8s.io/apimachinery v0.31.9
-	k8s.io/client-go v0.31.9
+	k8s.io/api v0.31.14
+	k8s.io/apimachinery v0.31.14
+	k8s.io/client-go v0.31.14
 	k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
-	sigs.k8s.io/cluster-api v1.9.8
+	sigs.k8s.io/cluster-api v1.9.11
 	sigs.k8s.io/cluster-api-provider-openstack v0.8.0
 	sigs.k8s.io/controller-runtime v0.19.7
-	sigs.k8s.io/yaml v1.4.0
+	sigs.k8s.io/yaml v1.6.0
 )
 
 require (
+	github.com/Masterminds/semver/v3 v3.4.0 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/blang/semver/v4 v4.0.0 // indirect
 	github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -44,7 +45,7 @@ require (
 	github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
 	github.com/google/uuid v1.6.0 // indirect
 	github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26 // indirect
-	github.com/hashicorp/go-version v1.7.0 // indirect
+	github.com/hashicorp/go-version v1.8.0 // indirect
 	github.com/imdario/mergo v0.3.16 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
@@ -54,30 +55,33 @@ require (
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
-	github.com/prometheus/client_golang v1.22.0 // indirect
-	github.com/prometheus/client_model v0.6.1 // indirect
-	github.com/prometheus/common v0.62.0 // indirect
-	github.com/prometheus/procfs v0.15.1 // indirect
-	github.com/spf13/pflag v1.0.6 // indirect
+	github.com/prometheus/client_golang v1.23.2 // indirect
+	github.com/prometheus/client_model v0.6.2 // indirect
+	github.com/prometheus/common v0.66.1 // indirect
+	github.com/prometheus/procfs v0.16.1 // indirect
+	github.com/spf13/pflag v1.0.10 // indirect
 	github.com/x448/float16 v0.8.4 // indirect
-	go.uber.org/automaxprocs v1.6.0 // indirect
-	go.uber.org/mock v0.5.2 // indirect
+	go.uber.org/mock v0.6.0 // indirect
 	go.uber.org/multierr v1.11.0 // indirect
 	go.uber.org/zap v1.27.0 // indirect
+	go.yaml.in/yaml/v2 v2.4.2 // indirect
+	go.yaml.in/yaml/v3 v3.0.4 // indirect
 	golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
-	golang.org/x/net v0.40.0 // indirect
-	golang.org/x/oauth2 v0.24.0 // indirect
-	golang.org/x/sys v0.33.0 // indirect
-	golang.org/x/term v0.32.0 // indirect
-	golang.org/x/text v0.26.0 // indirect
+	golang.org/x/mod v0.27.0 // indirect
+	golang.org/x/net v0.43.0 // indirect
+	golang.org/x/oauth2 v0.30.0 // indirect
+	golang.org/x/sync v0.16.0 // indirect
+	golang.org/x/sys v0.35.0 // indirect
+	golang.org/x/term v0.34.0 // indirect
+	golang.org/x/text v0.28.0 // indirect
 	golang.org/x/time v0.5.0 // indirect
-	golang.org/x/tools v0.33.0 // indirect
+	golang.org/x/tools v0.36.0 // indirect
 	gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
-	google.golang.org/protobuf v1.36.5 // indirect
+	google.golang.org/protobuf v1.36.8 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
-	k8s.io/apiextensions-apiserver v0.31.9 // indirect
+	k8s.io/apiextensions-apiserver v0.31.14 // indirect
 	k8s.io/klog/v2 v2.130.1 // indirect
 	k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect
 	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
diff --git a/openshift/go.sum b/openshift/go.sum
index 22d3c2078b..429989eb35 100644
--- a/openshift/go.sum
+++ b/openshift/go.sum
@@ -1,3 +1,5 @@
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
@@ -18,6 +20,12 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
 github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
 github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
 github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
+github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
+github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
+github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
+github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
+github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
 github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
 github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
@@ -32,6 +40,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
 github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
 github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
+github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
+github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
 github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk=
 github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
@@ -52,16 +62,18 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J
 github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
 github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
 github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
-github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
+github.com/gophercloud/gophercloud/v2 v2.9.0 h1:Y9OMrwKF9EDERcHFSOTpf/6XGoAI0yOxmsLmQki4LPM=
+github.com/gophercloud/gophercloud/v2 v2.9.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
 github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26 h1:N65GYmx5LrMeYdeXcxMESDU+2pDyAOXlFNlHl7siUwM=
 github.com/gophercloud/utils/v2 v2.0.0-20241209100706-e3a3b7c07d26/go.mod h1:7SHUbtoiSYINNKgAVxse+PMhIio05IK7shHy8DVRaN0=
-github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
-github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
+github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
 github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
 github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
 github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
+github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
 github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -76,6 +88,10 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
 github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
+github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
+github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
+github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -85,10 +101,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
 github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
-github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
-github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
-github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
+github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
+github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
+github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
+github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
 github.com/openshift/api v0.0.0-20231003083825-c3f7566f6ef6 h1:dLPcRvLeZs3ATgXCVrU/ZEVssHO5636sodMdJVAPPUQ=
 github.com/openshift/api v0.0.0-20231003083825-c3f7566f6ef6/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU=
 github.com/openshift/cluster-capi-operator/e2e v0.0.0-20250123104340-ddf11cf94960 h1:Rn22nhWAhm1XML+YWAXzsmE0Qn/CQGuhm2XgULVrIis=
@@ -98,20 +114,18 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
-github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
-github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
-github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
-github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -119,22 +133,32 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
+github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
+github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
+github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
+github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
 github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
 github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
-go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
 go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
-go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
+go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
+go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
 go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
 go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
 go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
 go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -142,44 +166,48 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0
 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
-golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
-golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
-golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
-golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
-golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
-golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
 golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
-golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
+golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
+golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
 gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
-google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -192,29 +220,31 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.31.9 h1:+gN4iZNccfr6y2EX28ZgcAq4yUKNZMhg2Jl72+2hoxQ=
-k8s.io/api v0.31.9/go.mod h1:+rao9hnuB9AHXVoqqwxPh493H91pte1ZhfJ6oz1qLJA=
-k8s.io/apiextensions-apiserver v0.31.9 h1:5U+Y7vvV+lVqOBjNmmTO42PxoQrp44yzXTHievxEhdY=
-k8s.io/apiextensions-apiserver v0.31.9/go.mod h1:tx/XA+SO6HhoXhXqvaeF5+iHlL7dF3wWACB6plC23M8=
-k8s.io/apimachinery v0.31.9 h1:sLGkHzsAfWVp55os8PlKw+eeIsB3IeVU1QLb3XKHyg8=
-k8s.io/apimachinery v0.31.9/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
-k8s.io/client-go v0.31.9 h1:SZr3xiDPdGwKeVR+jMYYubk1gJXA/go3obJeG/1Q/to=
-k8s.io/client-go v0.31.9/go.mod h1:ZwfOkKABRm2zSNR3s9OkADeyt0zhF9F78tJNupZM8zM=
+k8s.io/api v0.31.14 h1:xYn/S/WFJsksI7dk/5uBRd3Umm/D8W5g7sRnd4csotA=
+k8s.io/api v0.31.14/go.mod h1:K8fvRey4z73RAuxBZCma7WtY8WFvkViYhfFLCMT4xgA=
+k8s.io/apiextensions-apiserver v0.31.14 h1:1KupD0PyU7CgiT/PiZPSgZhTCL2KGwvXd1ejGcxjEfg=
+k8s.io/apiextensions-apiserver v0.31.14/go.mod h1:Odk14fSl/zaciI8DRUSPMSH74UXtz4gfinw7zY7YHvE=
+k8s.io/apimachinery v0.31.14 h1:/eMIwjv+GFm6A/sSGlB1NupBU6wTDPhEWsju0Fj69kY=
+k8s.io/apimachinery v0.31.14/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/client-go v0.31.14 h1:d4/G0xfksNIbMWH7ghjzOwC5bTAwQ20gABTjZw7fLlQ=
+k8s.io/client-go v0.31.14/go.mod h1:0uRpRB7r5QwtsbxEngZPkbcIVoNdAQAPIcopgiXjhQc=
 k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
 k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
 k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA=
 k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc=
 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/cluster-api v1.9.8 h1:VtgUzUgiE16d3P/XP7tIwPgkRXkdLvVj055o7wIQpaI=
-sigs.k8s.io/cluster-api v1.9.8/go.mod h1:6N73nqXbB1qTD3Z7zJc5WsRBen35JOflBdP73f23M2g=
+sigs.k8s.io/cluster-api v1.9.11 h1:bWOKVdg9UDxCaLQE5E5nDKFpca63jlcgmSx2wun/2+Q=
+sigs.k8s.io/cluster-api v1.9.11/go.mod h1:7ieY929gex3urS4k9+s2hnA7OTLEpjftAjXV5hAVhsA=
 sigs.k8s.io/controller-runtime v0.19.7 h1:DLABZfMr20A+AwCZOHhcbcu+TqBXnJZaVBri9K3EO48=
 sigs.k8s.io/controller-runtime v0.19.7/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ=
 sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
 sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
 sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
 sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/.gitignore b/openshift/vendor/github.com/Masterminds/semver/v3/.gitignore
new file mode 100644
index 0000000000..6b061e6174
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/.gitignore
@@ -0,0 +1 @@
+_fuzz/
\ No newline at end of file
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/openshift/vendor/github.com/Masterminds/semver/v3/.golangci.yml
new file mode 100644
index 0000000000..fbc6332592
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/.golangci.yml
@@ -0,0 +1,27 @@
+run:
+  deadline: 2m
+
+linters:
+  disable-all: true
+  enable:
+    - misspell
+    - govet
+    - staticcheck
+    - errcheck
+    - unparam
+    - ineffassign
+    - nakedret
+    - gocyclo
+    - dupl
+    - goimports
+    - revive
+    - gosec
+    - gosimple
+    - typecheck
+    - unused
+
+linters-settings:
+  gofmt:
+    simplify: true
+  dupl:
+    threshold: 600
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/openshift/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
new file mode 100644
index 0000000000..fabe5e43dc
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -0,0 +1,268 @@
+# Changelog
+
+## 3.4.0 (2025-06-27)
+
+### Added
+
+- #268: Added property to Constraints to include prereleases for Check and Validate
+
+### Changed
+
+- #263: Updated Go testing for 1.24, 1.23, and 1.22
+- #269: Updated the error message handling for message case and wrapping errors
+- #266: Restore the ability to have leading 0's when parsing with NewVersion.
+  Opt-out of this by setting CoerceNewVersion to false.
+
+### Fixed
+
+- #257: Fixed the CodeQL link (thanks @dmitris)
+- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out
+  of this by setting DetailedNewVersionErrors to false for faster performance.
+- #267: Handle pre-releases for an "and" group if one constraint includes them
+
+## 3.3.1 (2024-11-19)
+
+### Fixed
+
+- #253: Fix for allowing some version that were invalid
+
+## 3.3.0 (2024-08-27)
+
+### Added
+
+- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
+- #213: nil version equality checking (thanks @KnutZuidema)
+
+### Changed
+
+- #241: Simplify StrictNewVersion parsing (thanks @grosser)
+- Testing support up through Go 1.23
+- Minimum version set to 1.21 as this is what's tested now
+- Fuzz testing now supports caching
+
+## 3.2.1 (2023-04-10)
+
+### Changed
+
+- #198: Improved testing around pre-release names
+- #200: Improved code scanning with addition of CodeQL
+- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
+- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
+- #203: Docs updated for security details
+
+### Fixed
+
+- #199: Fixed issue with range transformations
+
+## 3.2.0 (2022-11-28)
+
+### Added
+
+- #190: Added text marshaling and unmarshaling
+- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg)
+- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker)
+- #179: Added New() version constructor (thanks @kazhuravlev)
+
+### Changed
+
+- #182/#183: Updated CI testing setup
+
+### Fixed
+
+- #186: Fixing issue where validation of constraint section gave false positives
+- #176: Fix constraints check with *-0 (thanks @mtt0)
+- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni)
+- #161: Fixed godoc (thanks @afirth)
+
+## 3.1.1 (2020-11-23)
+
+### Fixed
+
+- #158: Fixed issue with generated regex operation order that could cause problem
+
+## 3.1.0 (2020-04-15)
+
+### Added
+
+- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah)
+
+### Changed
+
+- #148: More accurate validation messages on constraints
+
+## 3.0.3 (2019-12-13)
+
+### Fixed
+
+- #141: Fixed issue with <= comparison
+
+## 3.0.2 (2019-11-14)
+
+### Fixed
+
+- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos)
+
+## 3.0.1 (2019-09-13)
+
+### Fixed
+
+- #125: Fixes issue with module path for v3
+
+## 3.0.0 (2019-09-12)
+
+This is a major release of the semver package which includes API changes. The Go
+API is compatible with ^1. The Go API was not changed because many people are using
+`go get` without Go modules for their applications and API breaking changes cause
+errors which we have or would need to support.
+
+The changes in this release are the handling based on the data passed into the
+functions. These are described in the added and changed sections below.
+
+### Added
+
+- StrictNewVersion function. This is similar to NewVersion but will return an
+  error if the version passed in is not a strict semantic version. For example,
+  1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly
+  speaking semantic versions. This function is faster, performs fewer operations,
+  and uses fewer allocations than NewVersion.
+- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint.
+  The Makefile contains the operations used. For more information on you can start
+  on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing
+- Now using Go modules
+
+### Changed
+
+- NewVersion has proper prerelease and metadata validation with error messages
+  to signal an issue with either of them
+- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the
+  version is >=1 the ^ ranges works the same as v1. For major versions of 0 the
+  rules have changed. The minor version is treated as the stable version unless
+  a patch is specified and then it is equivalent to =. One difference from npm/js
+  is that prereleases there are only to a specific version (e.g. 1.2.3).
+  Prereleases here look over multiple versions and follow semantic version
+  ordering rules. This pattern now follows along with the expected and requested
+  handling of this packaged by numerous users.
+
+## 1.5.0 (2019-09-11)
+
+### Added
+
+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
+
+### Changed
+
+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
+- #72: Adding docs comment pointing to vert for a cli
+- #71: Update the docs on pre-release comparator handling
+- #89: Test with new go versions (thanks @thedevsaddam)
+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
+
+### Fixed
+
+- #78: Fix unchecked error in example code (thanks @ravron)
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+- #97: Fixed copyright file for proper display on GitHub
+- #107: Fix handling prerelease when sorting alphanum and num
+- #109: Fixed where Validate sometimes returns wrong message on error
+
+## 1.4.2 (2018-04-10)
+
+### Changed
+
+- #72: Updated the docs to point to vert for a console appliaction
+- #71: Update the docs on pre-release comparator handling
+
+### Fixed
+
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+
+## 1.4.1 (2018-04-02)
+
+### Fixed
+
+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
+
+## 1.4.0 (2017-10-04)
+
+### Changed
+
+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
+
+## 1.3.1 (2017-07-10)
+
+### Fixed
+
+- Fixed #57: number comparisons in prerelease sometimes inaccurate
+
+## 1.3.0 (2017-05-02)
+
+### Added
+
+- #45: Added json (un)marshaling support (thanks @mh-cbon)
+- Stability marker. See https://masterminds.github.io/stability/
+
+### Fixed
+
+- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
+
+### Changed
+
+- #55: The godoc icon moved from png to svg
+
+## 1.2.3 (2017-04-03)
+
+### Fixed
+
+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
+
+## Release 1.2.2 (2016-12-13)
+
+### Fixed
+
+- #34: Fixed issue where hyphen range was not working with pre-release parsing.
+
+## Release 1.2.1 (2016-11-28)
+
+### Fixed
+
+- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
+  properly.
+
+## Release 1.2.0 (2016-11-04)
+
+### Added
+
+- #20: Added MustParse function for versions (thanks @adamreese)
+- #15: Added increment methods on versions (thanks @mh-cbon)
+
+### Fixed
+
+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
+  might not satisfy the intended compatibility. The change here ignores pre-releases
+  on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
+  constraint. For example, `^1.2.3` will ignore pre-releases while
+  `^1.2.3-alpha` will include them.
+
+## Release 1.1.1 (2016-06-30)
+
+### Changed
+
+- Issue #9: Speed up version comparison performance (thanks @sdboyer)
+- Issue #8: Added benchmarks (thanks @sdboyer)
+- Updated Go Report Card URL to new location
+- Updated Readme to add code snippet formatting (thanks @mh-cbon)
+- Updating tagging to v[SemVer] structure for compatibility with other tools.
+
+## Release 1.1.0 (2016-03-11)
+
+- Issue #2: Implemented validation to provide reasons a versions failed a
+  constraint.
+
+## Release 1.0.1 (2015-12-31)
+
+- Fixed #1: * constraint failing on valid versions.
+
+## Release 1.0.0 (2015-10-20)
+
+- Initial release
diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/openshift/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
similarity index 93%
rename from vendor/go.uber.org/automaxprocs/LICENSE
rename to openshift/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
index 20dcf51d96..9ff7da9c48 100644
--- a/vendor/go.uber.org/automaxprocs/LICENSE
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Uber Technologies, Inc.
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
@@ -16,4 +16,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
\ No newline at end of file
+THE SOFTWARE.
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/Makefile b/openshift/vendor/github.com/Masterminds/semver/v3/Makefile
new file mode 100644
index 0000000000..9ca87a2c79
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -0,0 +1,31 @@
+GOPATH=$(shell go env GOPATH)
+GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT)
+	@echo "==> Linting codebase"
+	@$(GOLANGCI_LINT) run
+
+.PHONY: test
+test:
+	@echo "==> Running tests"
+	GO111MODULE=on go test -v
+
+.PHONY: test-cover
+test-cover:
+	@echo "==> Running Tests with coverage"
+	GO111MODULE=on go test -cover .
+
+.PHONY: fuzz
+fuzz:
+	@echo "==> Running Fuzz Tests"
+	go env GOCACHE
+	go test -fuzz=FuzzNewVersion -fuzztime=15s .
+	go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
+	go test -fuzz=FuzzNewConstraint -fuzztime=15s .
+
+$(GOLANGCI_LINT):
+	# Install golangci-lint. The configuration for it is in the .golangci.yml
+	# file in the root of the repository
+	echo ${GOPATH}
+	curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/README.md b/openshift/vendor/github.com/Masterminds/semver/v3/README.md
new file mode 100644
index 0000000000..2f56c676a5
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/README.md
@@ -0,0 +1,274 @@
+# SemVer
+
+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
+
+* Parse semantic versions
+* Sort semantic versions
+* Check if a semantic version fits within a set of constraints
+* Optionally work with a `v` prefix
+
+[![Stability:
+Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html)
+[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions)
+[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3)
+[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
+
+## Package Versions
+
+Note, import `github.com/Masterminds/semver/v3` to use the latest version.
+
+There are three major versions fo the `semver` package.
+
+* 3.x.x is the stable and active version. This version is focused on constraint
+  compatibility for range handling in other tools from other languages. It has
+  a similar API to the v1 releases. The development of this version is on the master
+  branch. The documentation for this version is below.
+* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
+  no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
+  There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
+* 1.x.x is the original release. It is no longer maintained. You should use the
+  v3 release instead. You can read the documentation for the 1.x.x release
+  [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
+
+## Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an error is returned if there is an issue parsing the
+version. For example,
+
+    v, err := semver.NewVersion("1.2.3-beta.1+build345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. Getting the original string is useful if the semantic version was coerced
+into a valid form.
+
+There are package level variables that affect how `NewVersion` handles parsing.
+
+- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant
+  versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch
+  part. This enables the use of CalVer in versions even when not compliant with SemVer.
+  When set to `false` less coercion work is done.
+- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when
+  `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true`
+  it can provide some more insight into why a version is invalid. Setting
+  `DetailedNewVersionErrors` to `false` is faster on performance but provides less
+  detailed error messages if a version fails to parse.
+
+## Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+```go
+raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+vs := make([]*semver.Version, len(raw))
+for i, r := range raw {
+    v, err := semver.NewVersion(r)
+    if err != nil {
+        t.Errorf("Error parsing version: %s", err)
+    }
+
+    vs[i] = v
+}
+
+sort.Sort(semver.Collection(vs))
+```
+
+## Checking Version Constraints
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other uses `Constraints`. There are some important
+differences to notes between these two methods of comparison.
+
+1. When two versions are compared using functions such as `Compare`, `LessThan`,
+   and others it will follow the specification and always include pre-releases
+   within the comparison. It will provide an answer that is valid with the
+   comparison section of the spec at https://semver.org/#spec-item-11
+2. When constraint checking is used for checks or validation it will follow a
+   different set of rules that are common for ranges with tools like npm/js
+   and Rust/Cargo. This includes considering pre-releases to be invalid if the
+   ranges does not include one. If you want to have it include pre-releases a
+   simple solution is to include `-0` in your range.
+3. Constraint ranges can have some complex rules including the shorthand use of
+   ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns while PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+```go
+c, err := semver.NewConstraint(">= 1.2.3")
+if err != nil {
+    // Handle constraint not being parsable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+    // Handle version not being parsable.
+}
+// Check if the version meets the constraints. The variable a will be true.
+a := c.Check(v)
+```
+
+### Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of space or comma separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+* `=`: equal (aliased to no operator)
+* `!=`: not equal
+* `>`: greater than
+* `<`: less than
+* `>=`: greater than or equal to
+* `<=`: less than or equal to
+
+### Working With Prerelease Versions
+
+Pre-releases, for those not familiar with them, are used for software releases
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
+order of precedence, pre-releases come before their associated releases. In this
+example `1.2.3-beta.1 < 1.2.3`.
+
+According to the Semantic Version specification, pre-releases may not be
+API compliant with their release counterpart. It says,
+
+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
+
+SemVer's comparisons using constraints without a pre-release comparator will skip
+pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
+
+The reason for the `0` as a pre-release version in the example comparison is
+because pre-releases can only contain ASCII alphanumerics and hyphens (along with
+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the
+spec. The lowest character is a `0` in ASCII sort order
+(see an [ASCII Table](http://www.asciitable.com/))
+
+Understanding ASCII sort ordering is important because A-Z comes before a-z. That
+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
+the spec specifies.
+
+The `Constraints` instance returned from `semver.NewConstraint()` has a property
+`IncludePrerelease` that, when set to true, will return prerelease versions when calls
+to `Check()` and `Validate()` are made.
+
+### Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
+parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
+
+### Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the patch level comparison (see tilde below). For example,
+
+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `>= 1.2.x` is equivalent to `>= 1.2.0`
+* `<= 2.x` is equivalent to `< 3`
+* `*` is equivalent to `>= 0.0.0`
+
+### Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+* `~1` is equivalent to `>= 1, < 2`
+* `~2.3` is equivalent to `>= 2.3, < 2.4`
+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `~1.x` is equivalent to `>= 1, < 2`
+
+### Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+* `^2.3` is equivalent to `>= 2.3, < 3`
+* `^2.x` is equivalent to `>= 2.0.0, < 3`
+* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+* `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+* `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+* `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+## Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+```go
+c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+if err != nil {
+    // Handle constraint not being parseable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+    // Handle version not being parseable.
+}
+
+// Validate a version against a constraint.
+a, msgs := c.Validate(v)
+// a is false
+for _, m := range msgs {
+    fmt.Println(m)
+
+    // Loops over the errors which would read
+    // "1.3 is greater than 1.2.3"
+    // "1.3 is less than 1.4"
+}
+```
+
+## Contribute
+
+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
+or [create a pull request](https://github.com/Masterminds/semver/pulls).
+
+## Security
+
+Security is an important consideration for this project. The project currently
+uses the following tools to help discover security issues:
+
+* [CodeQL](https://codeql.github.com)
+* [gosec](https://github.com/securego/gosec)
+* Daily Fuzz testing
+
+If you believe you have found a security vulnerability you can privately disclose
+it through the [GitHub security page](https://github.com/Masterminds/semver/security).
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/openshift/vendor/github.com/Masterminds/semver/v3/SECURITY.md
new file mode 100644
index 0000000000..a30a66b1f7
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+The following versions of semver are currently supported:
+
+| Version | Supported          |
+| ------- | ------------------ |
+| 3.x     | :white_check_mark: |
+| 2.x     | :x:                |
+| 1.x     | :x:                |
+
+Fixes are only released for the latest minor version in the form of a patch release.
+
+## Reporting a Vulnerability
+
+You can privately disclose a vulnerability through GitHubs
+[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories)
+mechanism.
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/collection.go b/openshift/vendor/github.com/Masterminds/semver/v3/collection.go
new file mode 100644
index 0000000000..a78235895f
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/collection.go
@@ -0,0 +1,24 @@
+package semver
+
+// Collection is a collection of Version instances and implements the sort
+// interface. See the sort package for more details.
+// https://golang.org/pkg/sort/
+type Collection []*Version
+
+// Len returns the length of a collection. The number of Version instances
+// on the slice.
+func (c Collection) Len() int {
+	return len(c)
+}
+
+// Less is needed for the sort interface to compare two Version objects on the
+// slice. If checks if one is less than the other.
+func (c Collection) Less(i, j int) bool {
+	return c[i].LessThan(c[j])
+}
+
+// Swap is needed for the sort interface to replace the Version objects
+// at two different positions in the slice.
+func (c Collection) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/constraints.go b/openshift/vendor/github.com/Masterminds/semver/v3/constraints.go
new file mode 100644
index 0000000000..8b7a10f836
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/constraints.go
@@ -0,0 +1,601 @@
+package semver
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+// Constraints is one or more constraint that a semantic version can be
+// checked against.
+type Constraints struct {
+	constraints [][]*constraint
+	containsPre []bool
+
+	// IncludePrerelease specifies if pre-releases should be included in
+	// the results. Note, if a constraint range has a prerelease than
+	// prereleases will be included for that AND group even if this is
+	// set to false.
+	IncludePrerelease bool
+}
+
+// NewConstraint returns a Constraints instance that a Version instance can
+// be checked against. If there is a parse error it will be returned.
+func NewConstraint(c string) (*Constraints, error) {
+
+	// Rewrite - ranges into a comparison operation.
+	c = rewriteRange(c)
+
+	ors := strings.Split(c, "||")
+	lenors := len(ors)
+	or := make([][]*constraint, lenors)
+	hasPre := make([]bool, lenors)
+	for k, v := range ors {
+		// Validate the segment
+		if !validConstraintRegex.MatchString(v) {
+			return nil, fmt.Errorf("improper constraint: %s", v)
+		}
+
+		cs := findConstraintRegex.FindAllString(v, -1)
+		if cs == nil {
+			cs = append(cs, v)
+		}
+		result := make([]*constraint, len(cs))
+		for i, s := range cs {
+			pc, err := parseConstraint(s)
+			if err != nil {
+				return nil, err
+			}
+
+			// If one of the constraints has a prerelease record this.
+			// This information is used when checking all in an "and"
+			// group to ensure they all check for prereleases.
+			if pc.con.pre != "" {
+				hasPre[k] = true
+			}
+
+			result[i] = pc
+		}
+		or[k] = result
+	}
+
+	o := &Constraints{
+		constraints: or,
+		containsPre: hasPre,
+	}
+	return o, nil
+}
+
+// Check tests if a version satisfies the constraints.
+func (cs Constraints) Check(v *Version) bool {
+	// TODO(mattfarina): For v4 of this library consolidate the Check and Validate
+	// functions as the underlying functions make that possible now.
+	// loop over the ORs and check the inner ANDs
+	for i, o := range cs.constraints {
+		joy := true
+		for _, c := range o {
+			if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check {
+				joy = false
+				break
+			}
+		}
+
+		if joy {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Validate checks if a version satisfies a constraint. If not a slice of
+// reasons for the failure are returned in addition to a bool.
+func (cs Constraints) Validate(v *Version) (bool, []error) {
+	// loop over the ORs and check the inner ANDs
+	var e []error
+
+	// Capture the prerelease message only once. When it happens the first time
+	// this var is marked
+	var prerelesase bool
+	for i, o := range cs.constraints {
+		joy := true
+		for _, c := range o {
+			// Before running the check handle the case there the version is
+			// a prerelease and the check is not searching for prereleases.
+			if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" {
+				if !prerelesase {
+					em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+					e = append(e, em)
+					prerelesase = true
+				}
+				joy = false
+
+			} else {
+
+				if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil {
+					e = append(e, err)
+					joy = false
+				}
+			}
+		}
+
+		if joy {
+			return true, []error{}
+		}
+	}
+
+	return false, e
+}
+
+func (cs Constraints) String() string {
+	buf := make([]string, len(cs.constraints))
+	var tmp bytes.Buffer
+
+	for k, v := range cs.constraints {
+		tmp.Reset()
+		vlen := len(v)
+		for kk, c := range v {
+			tmp.WriteString(c.string())
+
+			// Space separate the AND conditions
+			if vlen > 1 && kk < vlen-1 {
+				tmp.WriteString(" ")
+			}
+		}
+		buf[k] = tmp.String()
+	}
+
+	return strings.Join(buf, " || ")
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (cs *Constraints) UnmarshalText(text []byte) error {
+	temp, err := NewConstraint(string(text))
+	if err != nil {
+		return err
+	}
+
+	*cs = *temp
+
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (cs Constraints) MarshalText() ([]byte, error) {
+	return []byte(cs.String()), nil
+}
+
+var constraintOps map[string]cfunc
+var constraintRegex *regexp.Regexp
+var constraintRangeRegex *regexp.Regexp
+
+// Used to find individual constraints within a multi-constraint string
+var findConstraintRegex *regexp.Regexp
+
+// Used to validate an segment of ANDs is valid
+var validConstraintRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+func init() {
+	constraintOps = map[string]cfunc{
+		"":   constraintTildeOrEqual,
+		"=":  constraintTildeOrEqual,
+		"!=": constraintNotEqual,
+		">":  constraintGreaterThan,
+		"<":  constraintLessThan,
+		">=": constraintGreaterThanEqual,
+		"=>": constraintGreaterThanEqual,
+		"<=": constraintLessThanEqual,
+		"=<": constraintLessThanEqual,
+		"~":  constraintTilde,
+		"~>": constraintTilde,
+		"^":  constraintCaret,
+	}
+
+	ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
+
+	constraintRegex = regexp.MustCompile(fmt.Sprintf(
+		`^\s*(%s)\s*(%s)\s*$`,
+		ops,
+		cvRegex))
+
+	constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
+		`\s*(%s)\s+-\s+(%s)\s*`,
+		cvRegex, cvRegex))
+
+	findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+		`(%s)\s*(%s)`,
+		ops,
+		cvRegex))
+
+	// The first time a constraint shows up will look slightly different from
+	// future times it shows up due to a leading space or comma in a given
+	// string.
+	validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+		`^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`,
+		ops,
+		cvRegex,
+		ops,
+		cvRegex))
+}
+
+// An individual constraint
+type constraint struct {
+	// The version used in the constraint check. For example, if a constraint
+	// is '<= 2.0.0' the con a version instance representing 2.0.0.
+	con *Version
+
+	// The original parsed version (e.g., 4.x from != 4.x)
+	orig string
+
+	// The original operator for the constraint
+	origfunc string
+
+	// When an x is used as part of the version (e.g., 1.x)
+	minorDirty bool
+	dirty      bool
+	patchDirty bool
+}
+
+// Check if a version meets the constraint
+func (c *constraint) check(v *Version, includePre bool) (bool, error) {
+	return constraintOps[c.origfunc](v, c, includePre)
+}
+
+// String prints an individual constraint into a string
+func (c *constraint) string() string {
+	return c.origfunc + c.orig
+}
+
+type cfunc func(v *Version, c *constraint, includePre bool) (bool, error)
+
+func parseConstraint(c string) (*constraint, error) {
+	if len(c) > 0 {
+		m := constraintRegex.FindStringSubmatch(c)
+		if m == nil {
+			return nil, fmt.Errorf("improper constraint: %s", c)
+		}
+
+		cs := &constraint{
+			orig:     m[2],
+			origfunc: m[1],
+		}
+
+		ver := m[2]
+		minorDirty := false
+		patchDirty := false
+		dirty := false
+		if isX(m[3]) || m[3] == "" {
+			ver = fmt.Sprintf("0.0.0%s", m[6])
+			dirty = true
+		} else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
+			minorDirty = true
+			dirty = true
+			ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+		} else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" {
+			dirty = true
+			patchDirty = true
+			ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+		}
+
+		con, err := NewVersion(ver)
+		if err != nil {
+
+			// The constraintRegex should catch any regex parsing errors. So,
+			// we should never get here.
+			return nil, errors.New("constraint parser error")
+		}
+
+		cs.con = con
+		cs.minorDirty = minorDirty
+		cs.patchDirty = patchDirty
+		cs.dirty = dirty
+
+		return cs, nil
+	}
+
+	// The rest is the special case where an empty string was passed in which
+	// is equivalent to * or >=0.0.0
+	con, err := StrictNewVersion("0.0.0")
+	if err != nil {
+
+		// The constraintRegex should catch any regex parsing errors. So,
+		// we should never get here.
+		return nil, errors.New("constraint parser error")
+	}
+
+	cs := &constraint{
+		con:        con,
+		orig:       c,
+		origfunc:   "",
+		minorDirty: false,
+		patchDirty: false,
+		dirty:      true,
+	}
+	return cs, nil
+}
+
+// Constraint functions
+func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	if c.dirty {
+		if c.con.Major() != v.Major() {
+			return true, nil
+		}
+		if c.con.Minor() != v.Minor() && !c.minorDirty {
+			return true, nil
+		} else if c.minorDirty {
+			return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+		} else if c.con.Patch() != v.Patch() && !c.patchDirty {
+			return true, nil
+		} else if c.patchDirty {
+			// Need to handle prereleases if present
+			if v.Prerelease() != "" || c.con.Prerelease() != "" {
+				eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0
+				if eq {
+					return true, nil
+				}
+				return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+			}
+			return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+		}
+	}
+
+	eq := v.Equal(c.con)
+	if eq {
+		return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+	}
+
+	return true, nil
+}
+
+func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) {
+
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	var eq bool
+
+	if !c.dirty {
+		eq = v.Compare(c.con) == 1
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	}
+
+	if v.Major() > c.con.Major() {
+		return true, nil
+	} else if v.Major() < c.con.Major() {
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	} else if c.minorDirty {
+		// This is a range case such as >11. When the version is something like
+		// 11.1.0 is it not > 11. For that we would need 12 or higher
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	} else if c.patchDirty {
+		// This is for ranges such as >11.1. A version of 11.1.1 is not greater
+		// which one of 11.2.1 is greater
+		eq = v.Minor() > c.con.Minor()
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	}
+
+	// If we have gotten here we are not comparing pre-preleases and can use the
+	// Compare function to accomplish that.
+	eq = v.Compare(c.con) == 1
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+}
+
+func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) {
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	eq := v.Compare(c.con) < 0
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
+}
+
+func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	eq := v.Compare(c.con) >= 0
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s is less than %s", v, c.orig)
+}
+
+func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	var eq bool
+
+	if !c.dirty {
+		eq = v.Compare(c.con) <= 0
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+	}
+
+	if v.Major() > c.con.Major() {
+		return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+	} else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty {
+		return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+	}
+
+	return true, nil
+}
+
+// ~*, ~>* --> >= 0.0.0 (any)
+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
+func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) {
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	if v.LessThan(c.con) {
+		return false, fmt.Errorf("%s is less than %s", v, c.orig)
+	}
+
+	// ~0.0.0 is a special case where all constraints are accepted. It's
+	// equivalent to >= 0.0.0.
+	if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
+		!c.minorDirty && !c.patchDirty {
+		return true, nil
+	}
+
+	if v.Major() != c.con.Major() {
+		return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+	}
+
+	if v.Minor() != c.con.Minor() && !c.minorDirty {
+		return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig)
+	}
+
+	return true, nil
+}
+
+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
+// it's a straight =
+func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	if c.dirty {
+		return constraintTilde(v, c, includePre)
+	}
+
+	eq := v.Equal(c.con)
+	if eq {
+		return true, nil
+	}
+
+	return false, fmt.Errorf("%s is not equal to %s", v, c.orig)
+}
+
+// ^*      -->  (any)
+// ^1.2.3  -->  >=1.2.3 <2.0.0
+// ^1.2    -->  >=1.2.0 <2.0.0
+// ^1      -->  >=1.0.0 <2.0.0
+// ^0.2.3  -->  >=0.2.3 <0.3.0
+// ^0.2    -->  >=0.2.0 <0.3.0
+// ^0.0.3  -->  >=0.0.3 <0.0.4
+// ^0.0    -->  >=0.0.0 <0.1.0
+// ^0      -->  >=0.0.0 <1.0.0
+func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) {
+	// The existence of prereleases is checked at the group level and passed in.
+	// Exit early if the version has a prerelease but those are to be ignored.
+	if v.Prerelease() != "" && !includePre {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	// This less than handles prereleases
+	if v.LessThan(c.con) {
+		return false, fmt.Errorf("%s is less than %s", v, c.orig)
+	}
+
+	var eq bool
+
+	// ^ when the major > 0 is >=x.y.z < x+1
+	if c.con.Major() > 0 || c.minorDirty {
+
+		// ^ has to be within a major range for > 0. Everything less than was
+		// filtered out with the LessThan call above. This filters out those
+		// that greater but not within the same major range.
+		eq = v.Major() == c.con.Major()
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+	}
+
+	// ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1
+	if c.con.Major() == 0 && v.Major() > 0 {
+		return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+	}
+	// If the con Minor is > 0 it is not dirty
+	if c.con.Minor() > 0 || c.patchDirty {
+		eq = v.Minor() == c.con.Minor()
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig)
+	}
+	// ^ when the minor is 0 and minor > 0 is =0.0.z
+	if c.con.Minor() == 0 && v.Minor() > 0 {
+		return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig)
+	}
+
+	// At this point the major is 0 and the minor is 0 and not dirty. The patch
+	// is not dirty so we need to check if they are equal. If they are not equal
+	eq = c.con.Patch() == v.Patch()
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig)
+}
+
+func isX(x string) bool {
+	switch x {
+	case "x", "*", "X":
+		return true
+	default:
+		return false
+	}
+}
+
+func rewriteRange(i string) string {
+	m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+	if m == nil {
+		return i
+	}
+	o := i
+	for _, v := range m {
+		t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11])
+		o = strings.Replace(o, v[0], t, 1)
+	}
+
+	return o
+}
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/doc.go b/openshift/vendor/github.com/Masterminds/semver/v3/doc.go
new file mode 100644
index 0000000000..74f97caa57
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/doc.go
@@ -0,0 +1,184 @@
+/*
+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
+
+Specifically it provides the ability to:
+
+  - Parse semantic versions
+  - Sort semantic versions
+  - Check if a semantic version fits within a set of constraints
+  - Optionally work with a `v` prefix
+
+# Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an optional error can be returned if there is an issue
+parsing the version. For example,
+
+	v, err := semver.NewVersion("1.2.3-beta.1+b345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. For more details please see the documentation
+at https://godoc.org/github.com/Masterminds/semver.
+
+# Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+	    raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+	    vs := make([]*semver.Version, len(raw))
+		for i, r := range raw {
+			v, err := semver.NewVersion(r)
+			if err != nil {
+				t.Errorf("Error parsing version: %s", err)
+			}
+
+			vs[i] = v
+		}
+
+		sort.Sort(semver.Collection(vs))
+
+# Checking Version Constraints and Comparing Versions
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other is using Constraints. There are some important
+differences to notes between these two methods of comparison.
+
+ 1. When two versions are compared using functions such as `Compare`, `LessThan`,
+    and others it will follow the specification and always include prereleases
+    within the comparison. It will provide an answer valid with the comparison
+    spec section at https://semver.org/#spec-item-11
+ 2. When constraint checking is used for checks or validation it will follow a
+    different set of rules that are common for ranges with tools like npm/js
+    and Rust/Cargo. This includes considering prereleases to be invalid if the
+    ranges does not include on. If you want to have it include pre-releases a
+    simple solution is to include `-0` in your range.
+ 3. Constraint ranges can have some complex rules including the shorthard use of
+    ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns which PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+	c, err := semver.NewConstraint(">= 1.2.3")
+	if err != nil {
+	    // Handle constraint not being parsable.
+	}
+
+	v, err := semver.NewVersion("1.3")
+	if err != nil {
+	    // Handle version not being parsable.
+	}
+	// Check if the version meets the constraints. The a variable will be true.
+	a := c.Check(v)
+
+# Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma or space separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3. This can also be written as
+`">= 1.2, < 3.0.0 || >= 4.2.3"`
+
+The basic comparisons are:
+
+  - `=`: equal (aliased to no operator)
+  - `!=`: not equal
+  - `>`: greater than
+  - `<`: less than
+  - `>=`: greater than or equal to
+  - `<=`: less than or equal to
+
+# Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+  - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+  - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+# Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the tilde operation. For example,
+
+  - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+  - `>= 1.2.x` is equivalent to `>= 1.2.0`
+  - `<= 2.x` is equivalent to `<= 3`
+  - `*` is equivalent to `>= 0.0.0`
+
+Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+  - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0`
+  - `~1` is equivalent to `>= 1, < 2`
+  - `~2.3` is equivalent to `>= 2.3 < 2.4`
+  - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+  - `~1.x` is equivalent to `>= 1 < 2`
+
+Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+  - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+  - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+  - `^2.3` is equivalent to `>= 2.3, < 3`
+  - `^2.x` is equivalent to `>= 2.0.0, < 3`
+  - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+  - `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+  - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+  - `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+  - `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+# Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+	c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+	if err != nil {
+	    // Handle constraint not being parseable.
+	}
+
+	v, _ := semver.NewVersion("1.3")
+	if err != nil {
+	    // Handle version not being parseable.
+	}
+
+	// Validate a version against a constraint.
+	a, msgs := c.Validate(v)
+	// a is false
+	for _, m := range msgs {
+	    fmt.Println(m)
+
+	    // Loops over the errors which would read
+	    // "1.3 is greater than 1.2.3"
+	    // "1.3 is less than 1.4"
+	}
+*/
+package semver
diff --git a/openshift/vendor/github.com/Masterminds/semver/v3/version.go b/openshift/vendor/github.com/Masterminds/semver/v3/version.go
new file mode 100644
index 0000000000..7a3ba73887
--- /dev/null
+++ b/openshift/vendor/github.com/Masterminds/semver/v3/version.go
@@ -0,0 +1,788 @@
+package semver
+
+import (
+	"bytes"
+	"database/sql/driver"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// The compiled version of the regex created at init() is cached here so it
+// only needs to be created once.
+var versionRegex *regexp.Regexp
+var looseVersionRegex *regexp.Regexp
+
+// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are
+// not allowed in a valid semantic version. When set to true, NewVersion will coerce
+// leading 0's into a valid version.
+var CoerceNewVersion = true
+
+// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion
+// function. This is used when CoerceNewVersion is set to false. If set to false
+// ErrInvalidSemVer is returned for an invalid version. This does not apply to
+// StrictNewVersion. Setting this function to false returns errors more quickly.
+var DetailedNewVersionErrors = true
+
+var (
+	// ErrInvalidSemVer is returned a version is found to be invalid when
+	// being parsed.
+	ErrInvalidSemVer = errors.New("invalid semantic version")
+
+	// ErrEmptyString is returned when an empty string is passed in for parsing.
+	ErrEmptyString = errors.New("version string empty")
+
+	// ErrInvalidCharacters is returned when invalid characters are found as
+	// part of a version
+	ErrInvalidCharacters = errors.New("invalid characters in version")
+
+	// ErrSegmentStartsZero is returned when a version segment starts with 0.
+	// This is invalid in SemVer.
+	ErrSegmentStartsZero = errors.New("version segment starts with 0")
+
+	// ErrInvalidMetadata is returned when the metadata is an invalid format
+	ErrInvalidMetadata = errors.New("invalid metadata string")
+
+	// ErrInvalidPrerelease is returned when the pre-release is an invalid format
+	ErrInvalidPrerelease = errors.New("invalid prerelease string")
+)
+
+// semVerRegex is the regular expression used to parse a semantic version.
+// This is not the official regex from the semver spec. It has been modified to allow for loose handling
+// where versions like 2.1 are detected.
+const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
+	`(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
+	`(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
+
+// looseSemVerRegex is a regular expression that lets invalid semver expressions through
+// with enough detail that certain errors can be checked for.
+const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+// Version represents a single semantic version.
+type Version struct {
+	major, minor, patch uint64
+	pre                 string
+	metadata            string
+	original            string
+}
+
+func init() {
+	versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
+	looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$")
+}
+
+const (
+	num     string = "0123456789"
+	allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num
+)
+
+// StrictNewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. Only parses valid semantic versions.
+// Performs checking that can find errors within the version.
+// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x
+// releases of semver did, use the NewVersion() function.
+func StrictNewVersion(v string) (*Version, error) {
+	// Parsing here does not use RegEx in order to increase performance and reduce
+	// allocations.
+
+	if len(v) == 0 {
+		return nil, ErrEmptyString
+	}
+
+	// Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build
+	parts := strings.SplitN(v, ".", 3)
+	if len(parts) != 3 {
+		return nil, ErrInvalidSemVer
+	}
+
+	sv := &Version{
+		original: v,
+	}
+
+	// Extract build metadata
+	if strings.Contains(parts[2], "+") {
+		extra := strings.SplitN(parts[2], "+", 2)
+		sv.metadata = extra[1]
+		parts[2] = extra[0]
+		if err := validateMetadata(sv.metadata); err != nil {
+			return nil, err
+		}
+	}
+
+	// Extract build prerelease
+	if strings.Contains(parts[2], "-") {
+		extra := strings.SplitN(parts[2], "-", 2)
+		sv.pre = extra[1]
+		parts[2] = extra[0]
+		if err := validatePrerelease(sv.pre); err != nil {
+			return nil, err
+		}
+	}
+
+	// Validate the number segments are valid. This includes only having positive
+	// numbers and no leading 0's.
+	for _, p := range parts {
+		if !containsOnly(p, num) {
+			return nil, ErrInvalidCharacters
+		}
+
+		if len(p) > 1 && p[0] == '0' {
+			return nil, ErrSegmentStartsZero
+		}
+	}
+
+	// Extract major, minor, and patch
+	var err error
+	sv.major, err = strconv.ParseUint(parts[0], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	sv.minor, err = strconv.ParseUint(parts[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	sv.patch, err = strconv.ParseUint(parts[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return sv, nil
+}
+
+// NewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. If the version is SemVer-ish it
+// attempts to convert it to SemVer. If you want  to validate it was a strict
+// semantic version at parse time see StrictNewVersion().
+func NewVersion(v string) (*Version, error) {
+	if CoerceNewVersion {
+		return coerceNewVersion(v)
+	}
+	m := versionRegex.FindStringSubmatch(v)
+	if m == nil {
+
+		// Disabling detailed errors is first so that it is in the fast path.
+		if !DetailedNewVersionErrors {
+			return nil, ErrInvalidSemVer
+		}
+
+		// Check for specific errors with the semver string and return a more detailed
+		// error.
+		m = looseVersionRegex.FindStringSubmatch(v)
+		if m == nil {
+			return nil, ErrInvalidSemVer
+		}
+		err := validateVersion(m)
+		if err != nil {
+			return nil, err
+		}
+		return nil, ErrInvalidSemVer
+	}
+
+	sv := &Version{
+		metadata: m[5],
+		pre:      m[4],
+		original: v,
+	}
+
+	var err error
+	sv.major, err = strconv.ParseUint(m[1], 10, 64)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing version segment: %w", err)
+	}
+
+	if m[2] != "" {
+		sv.minor, err = strconv.ParseUint(m[2], 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing version segment: %w", err)
+		}
+	} else {
+		sv.minor = 0
+	}
+
+	if m[3] != "" {
+		sv.patch, err = strconv.ParseUint(m[3], 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing version segment: %w", err)
+		}
+	} else {
+		sv.patch = 0
+	}
+
+	// Perform some basic due diligence on the extra parts to ensure they are
+	// valid.
+
+	if sv.pre != "" {
+		if err = validatePrerelease(sv.pre); err != nil {
+			return nil, err
+		}
+	}
+
+	if sv.metadata != "" {
+		if err = validateMetadata(sv.metadata); err != nil {
+			return nil, err
+		}
+	}
+
+	return sv, nil
+}
+
+func coerceNewVersion(v string) (*Version, error) {
+	m := looseVersionRegex.FindStringSubmatch(v)
+	if m == nil {
+		return nil, ErrInvalidSemVer
+	}
+
+	sv := &Version{
+		metadata: m[8],
+		pre:      m[5],
+		original: v,
+	}
+
+	var err error
+	sv.major, err = strconv.ParseUint(m[1], 10, 64)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing version segment: %w", err)
+	}
+
+	if m[2] != "" {
+		sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing version segment: %w", err)
+		}
+	} else {
+		sv.minor = 0
+	}
+
+	if m[3] != "" {
+		sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing version segment: %w", err)
+		}
+	} else {
+		sv.patch = 0
+	}
+
+	// Perform some basic due diligence on the extra parts to ensure they are
+	// valid.
+
+	if sv.pre != "" {
+		if err = validatePrerelease(sv.pre); err != nil {
+			return nil, err
+		}
+	}
+
+	if sv.metadata != "" {
+		if err = validateMetadata(sv.metadata); err != nil {
+			return nil, err
+		}
+	}
+
+	return sv, nil
+}
+
+// New creates a new instance of Version with each of the parts passed in as
+// arguments instead of parsing a version string.
+func New(major, minor, patch uint64, pre, metadata string) *Version {
+	v := Version{
+		major:    major,
+		minor:    minor,
+		patch:    patch,
+		pre:      pre,
+		metadata: metadata,
+		original: "",
+	}
+
+	v.original = v.String()
+
+	return &v
+}
+
+// MustParse parses a given version and panics on error.
+func MustParse(v string) *Version {
+	sv, err := NewVersion(v)
+	if err != nil {
+		panic(err)
+	}
+	return sv
+}
+
+// String converts a Version object to a string.
+// Note, if the original version contained a leading v this version will not.
+// See the Original() method to retrieve the original value. Semantic Versions
+// don't contain a leading v per the spec. Instead it's optional on
+// implementation.
+func (v Version) String() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
+	if v.pre != "" {
+		fmt.Fprintf(&buf, "-%s", v.pre)
+	}
+	if v.metadata != "" {
+		fmt.Fprintf(&buf, "+%s", v.metadata)
+	}
+
+	return buf.String()
+}
+
+// Original returns the original value passed in to be parsed.
+func (v *Version) Original() string {
+	return v.original
+}
+
+// Major returns the major version.
+func (v Version) Major() uint64 {
+	return v.major
+}
+
+// Minor returns the minor version.
+func (v Version) Minor() uint64 {
+	return v.minor
+}
+
+// Patch returns the patch version.
+func (v Version) Patch() uint64 {
+	return v.patch
+}
+
+// Prerelease returns the pre-release version.
+func (v Version) Prerelease() string {
+	return v.pre
+}
+
+// Metadata returns the metadata on the version.
+func (v Version) Metadata() string {
+	return v.metadata
+}
+
+// originalVPrefix returns the original 'v' prefix if any.
+func (v Version) originalVPrefix() string {
+	// Note, only lowercase v is supported as a prefix by the parser.
+	if v.original != "" && v.original[:1] == "v" {
+		return v.original[:1]
+	}
+	return ""
+}
+
+// IncPatch produces the next patch version.
+// If the current version does not have prerelease/metadata information,
+// it unsets metadata and prerelease values, increments patch number.
+// If the current version has any of prerelease or metadata information,
+// it unsets both values and keeps current patch value
+func (v Version) IncPatch() Version {
+	vNext := v
+	// according to http://semver.org/#spec-item-9
+	// Pre-release versions have a lower precedence than the associated normal version.
+	// according to http://semver.org/#spec-item-10
+	// Build metadata SHOULD be ignored when determining version precedence.
+	if v.pre != "" {
+		vNext.metadata = ""
+		vNext.pre = ""
+	} else {
+		vNext.metadata = ""
+		vNext.pre = ""
+		vNext.patch = v.patch + 1
+	}
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext
+}
+
+// IncMinor produces the next minor version.
+// Sets patch to 0.
+// Increments minor number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMinor() Version {
+	vNext := v
+	vNext.metadata = ""
+	vNext.pre = ""
+	vNext.patch = 0
+	vNext.minor = v.minor + 1
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext
+}
+
+// IncMajor produces the next major version.
+// Sets patch to 0.
+// Sets minor to 0.
+// Increments major number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMajor() Version {
+	vNext := v
+	vNext.metadata = ""
+	vNext.pre = ""
+	vNext.patch = 0
+	vNext.minor = 0
+	vNext.major = v.major + 1
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext
+}
+
+// SetPrerelease defines the prerelease value.
+// Value must not include the required 'hyphen' prefix.
+func (v Version) SetPrerelease(prerelease string) (Version, error) {
+	vNext := v
+	if len(prerelease) > 0 {
+		if err := validatePrerelease(prerelease); err != nil {
+			return vNext, err
+		}
+	}
+	vNext.pre = prerelease
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext, nil
+}
+
+// SetMetadata defines metadata value.
+// Value must not include the required 'plus' prefix.
+func (v Version) SetMetadata(metadata string) (Version, error) {
+	vNext := v
+	if len(metadata) > 0 {
+		if err := validateMetadata(metadata); err != nil {
+			return vNext, err
+		}
+	}
+	vNext.metadata = metadata
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext, nil
+}
+
+// LessThan tests if one version is less than another one.
+func (v *Version) LessThan(o *Version) bool {
+	return v.Compare(o) < 0
+}
+
+// LessThanEqual tests if one version is less or equal than another one.
+func (v *Version) LessThanEqual(o *Version) bool {
+	return v.Compare(o) <= 0
+}
+
+// GreaterThan tests if one version is greater than another one.
+func (v *Version) GreaterThan(o *Version) bool {
+	return v.Compare(o) > 0
+}
+
+// GreaterThanEqual tests if one version is greater or equal than another one.
+func (v *Version) GreaterThanEqual(o *Version) bool {
+	return v.Compare(o) >= 0
+}
+
+// Equal tests if two versions are equal to each other.
+// Note, versions can be equal with different metadata since metadata
+// is not considered part of the comparable version.
+func (v *Version) Equal(o *Version) bool {
+	if v == o {
+		return true
+	}
+	if v == nil || o == nil {
+		return false
+	}
+	return v.Compare(o) == 0
+}
+
+// Compare compares this version to another one. It returns -1, 0, or 1 if
+// the version smaller, equal, or larger than the other version.
+//
+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
+// lower than the version without a prerelease. Compare always takes into account
+// prereleases. If you want to work with ranges using typical range syntaxes that
+// skip prereleases if the range is not looking for them use constraints.
+func (v *Version) Compare(o *Version) int {
+	// Compare the major, minor, and patch version for differences. If a
+	// difference is found return the comparison.
+	if d := compareSegment(v.Major(), o.Major()); d != 0 {
+		return d
+	}
+	if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
+		return d
+	}
+	if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
+		return d
+	}
+
+	// At this point the major, minor, and patch versions are the same.
+	ps := v.pre
+	po := o.Prerelease()
+
+	if ps == "" && po == "" {
+		return 0
+	}
+	if ps == "" {
+		return 1
+	}
+	if po == "" {
+		return -1
+	}
+
+	return comparePrerelease(ps, po)
+}
+
+// UnmarshalJSON implements JSON.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	temp, err := NewVersion(s)
+	if err != nil {
+		return err
+	}
+	v.major = temp.major
+	v.minor = temp.minor
+	v.patch = temp.patch
+	v.pre = temp.pre
+	v.metadata = temp.metadata
+	v.original = temp.original
+	return nil
+}
+
+// MarshalJSON implements JSON.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (v *Version) UnmarshalText(text []byte) error {
+	temp, err := NewVersion(string(text))
+	if err != nil {
+		return err
+	}
+
+	*v = *temp
+
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (v Version) MarshalText() ([]byte, error) {
+	return []byte(v.String()), nil
+}
+
+// Scan implements the SQL.Scanner interface.
+func (v *Version) Scan(value interface{}) error {
+	var s string
+	s, _ = value.(string)
+	temp, err := NewVersion(s)
+	if err != nil {
+		return err
+	}
+	v.major = temp.major
+	v.minor = temp.minor
+	v.patch = temp.patch
+	v.pre = temp.pre
+	v.metadata = temp.metadata
+	v.original = temp.original
+	return nil
+}
+
+// Value implements the Driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+	return v.String(), nil
+}
+
+func compareSegment(v, o uint64) int {
+	if v < o {
+		return -1
+	}
+	if v > o {
+		return 1
+	}
+
+	return 0
+}
+
+func comparePrerelease(v, o string) int {
+	// split the prelease versions by their part. The separator, per the spec,
+	// is a .
+	sparts := strings.Split(v, ".")
+	oparts := strings.Split(o, ".")
+
+	// Find the longer length of the parts to know how many loop iterations to
+	// go through.
+	slen := len(sparts)
+	olen := len(oparts)
+
+	l := slen
+	if olen > slen {
+		l = olen
+	}
+
+	// Iterate over each part of the prereleases to compare the differences.
+	for i := 0; i < l; i++ {
+		// Since the lentgh of the parts can be different we need to create
+		// a placeholder. This is to avoid out of bounds issues.
+		stemp := ""
+		if i < slen {
+			stemp = sparts[i]
+		}
+
+		otemp := ""
+		if i < olen {
+			otemp = oparts[i]
+		}
+
+		d := comparePrePart(stemp, otemp)
+		if d != 0 {
+			return d
+		}
+	}
+
+	// Reaching here means two versions are of equal value but have different
+	// metadata (the part following a +). They are not identical in string form
+	// but the version comparison finds them to be equal.
+	return 0
+}
+
+func comparePrePart(s, o string) int {
+	// Fastpath if they are equal
+	if s == o {
+		return 0
+	}
+
+	// When s or o are empty we can use the other in an attempt to determine
+	// the response.
+	if s == "" {
+		if o != "" {
+			return -1
+		}
+		return 1
+	}
+
+	if o == "" {
+		if s != "" {
+			return 1
+		}
+		return -1
+	}
+
+	// When comparing strings "99" is greater than "103". To handle
+	// cases like this we need to detect numbers and compare them. According
+	// to the semver spec, numbers are always positive. If there is a - at the
+	// start like -99 this is to be evaluated as an alphanum. numbers always
+	// have precedence over alphanum. Parsing as Uints because negative numbers
+	// are ignored.
+
+	oi, n1 := strconv.ParseUint(o, 10, 64)
+	si, n2 := strconv.ParseUint(s, 10, 64)
+
+	// The case where both are strings compare the strings
+	if n1 != nil && n2 != nil {
+		if s > o {
+			return 1
+		}
+		return -1
+	} else if n1 != nil {
+		// o is a string and s is a number
+		return -1
+	} else if n2 != nil {
+		// s is a string and o is a number
+		return 1
+	}
+	// Both are numbers
+	if si > oi {
+		return 1
+	}
+	return -1
+}
+
+// Like strings.ContainsAny but does an only instead of any.
+func containsOnly(s string, comp string) bool {
+	return strings.IndexFunc(s, func(r rune) bool {
+		return !strings.ContainsRune(comp, r)
+	}) == -1
+}
+
+// From the spec, "Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty.
+// Numeric identifiers MUST NOT include leading zeroes.". These segments can
+// be dot separated.
+func validatePrerelease(p string) error {
+	eparts := strings.Split(p, ".")
+	for _, p := range eparts {
+		if p == "" {
+			return ErrInvalidPrerelease
+		} else if containsOnly(p, num) {
+			if len(p) > 1 && p[0] == '0' {
+				return ErrSegmentStartsZero
+			}
+		} else if !containsOnly(p, allowed) {
+			return ErrInvalidPrerelease
+		}
+	}
+
+	return nil
+}
+
+// From the spec, "Build metadata MAY be denoted by
+// appending a plus sign and a series of dot separated identifiers immediately
+// following the patch or pre-release version. Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."
+func validateMetadata(m string) error {
+	eparts := strings.Split(m, ".")
+	for _, p := range eparts {
+		if p == "" {
+			return ErrInvalidMetadata
+		} else if !containsOnly(p, allowed) {
+			return ErrInvalidMetadata
+		}
+	}
+	return nil
+}
+
+// validateVersion checks for common validation issues but may not catch all errors
+func validateVersion(m []string) error {
+	var err error
+	var v string
+	if m[1] != "" {
+		if len(m[1]) > 1 && m[1][0] == '0' {
+			return ErrSegmentStartsZero
+		}
+		_, err = strconv.ParseUint(m[1], 10, 64)
+		if err != nil {
+			return fmt.Errorf("error parsing version segment: %w", err)
+		}
+	}
+
+	if m[2] != "" {
+		v = strings.TrimPrefix(m[2], ".")
+		if len(v) > 1 && v[0] == '0' {
+			return ErrSegmentStartsZero
+		}
+		_, err = strconv.ParseUint(v, 10, 64)
+		if err != nil {
+			return fmt.Errorf("error parsing version segment: %w", err)
+		}
+	}
+
+	if m[3] != "" {
+		v = strings.TrimPrefix(m[3], ".")
+		if len(v) > 1 && v[0] == '0' {
+			return ErrSegmentStartsZero
+		}
+		_, err = strconv.ParseUint(v, 10, 64)
+		if err != nil {
+			return fmt.Errorf("error parsing version segment: %w", err)
+		}
+	}
+
+	if m[5] != "" {
+		if err = validatePrerelease(m[5]); err != nil {
+			return err
+		}
+	}
+
+	if m[8] != "" {
+		if err = validateMetadata(m[8]); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md b/openshift/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md
index 73fe513468..773af218e9 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md
@@ -1,3 +1,72 @@
+## v2.9.0 (2025-11-17)
+
+* [GH-3508](https://github.com/gophercloud/gophercloud/pull/3508) [v2] Trigger "hold" workflow on merge groups
+* [GH-3511](https://github.com/gophercloud/gophercloud/pull/3511) [v2] Closes #2321 - Fix TestRolesCRUD by including DomainID to TestRolesCRUD
+* [GH-3513](https://github.com/gophercloud/gophercloud/pull/3513) [v2] build(deps): bump actions/labeler from 5 to 6
+* [GH-3516](https://github.com/gophercloud/gophercloud/pull/3516) [v2] refactor: Trivial fixes
+* [GH-3524](https://github.com/gophercloud/gophercloud/pull/3524) [v2] [glance]: Add 'uploading' status
+* [GH-3525](https://github.com/gophercloud/gophercloud/pull/3525) [v2] compute: Add host aggregate uuid field
+* [GH-3526](https://github.com/gophercloud/gophercloud/pull/3526) [v2] Enable deletion for network and loadbalancer quotas
+* [GH-3541](https://github.com/gophercloud/gophercloud/pull/3541) [v2] docs: Document tested releases for acceptance tests
+* [GH-3544](https://github.com/gophercloud/gophercloud/pull/3544) [v2] Identity V3: Add Options field to roles.
+* [GH-3547](https://github.com/gophercloud/gophercloud/pull/3547) [v2] Add config_drive to server struct
+* [GH-3548](https://github.com/gophercloud/gophercloud/pull/3548) [v2] Identity: Add description field to roles
+* [GH-3549](https://github.com/gophercloud/gophercloud/pull/3549) [v2] compute: add cpu info topology cells entry
+* [GH-3550](https://github.com/gophercloud/gophercloud/pull/3550) [v2] Migrate epoxy jobs to Ubuntu 24.04 (Noble), drop caracal jobs
+* [GH-3551](https://github.com/gophercloud/gophercloud/pull/3551) [v2] build(deps): bump github/codeql-action from 3 to 4
+* [GH-3557](https://github.com/gophercloud/gophercloud/pull/3557) [v2] Fix EC2 authentication to work with new Keystone auth requirement
+* [GH-3558](https://github.com/gophercloud/gophercloud/pull/3558) [v2] identity/services: add omitempty to the `type` field
+* [GH-3559](https://github.com/gophercloud/gophercloud/pull/3559) [v2] fix: handle Nova create image response for microversion 2.45 and above
+
+## v2.8.0 (2025-08-18)
+
+* [GH-3348](https://github.com/gophercloud/gophercloud/pull/3348) [v2] [networking] add ExtractRoutersInto func helper to routers
+* [GH-3354](https://github.com/gophercloud/gophercloud/pull/3354) [v2] Fix a small typo
+* [GH-3358](https://github.com/gophercloud/gophercloud/pull/3358) [v2] tests: fix devstack master branch tests
+* [GH-3361](https://github.com/gophercloud/gophercloud/pull/3361) [v2] octavia: fix http_version type to float
+* [GH-3362](https://github.com/gophercloud/gophercloud/pull/3362) [v2] tests: fix containerinfra template creation
+* [GH-3367](https://github.com/gophercloud/gophercloud/pull/3367) [v2] Use Makefile for CI jobs
+* [GH-3375](https://github.com/gophercloud/gophercloud/pull/3375) [v2] core: add missing Builder interfaces
+* [GH-3378](https://github.com/gophercloud/gophercloud/pull/3378) [v2] tests: fix failing rabbitmq service
+* [GH-3379](https://github.com/gophercloud/gophercloud/pull/3379) [v2] CI: Remove Bobcat
+* [GH-3384](https://github.com/gophercloud/gophercloud/pull/3384) [v2] Move master CI jobs to Ubuntu 24.04
+* [GH-3386](https://github.com/gophercloud/gophercloud/pull/3386) [v2] tests: Fix TestBGPAgentCRUD
+* [GH-3387](https://github.com/gophercloud/gophercloud/pull/3387) [v2] Update the doc of openstack.AuthOptionsFromEnv function
+* [GH-3389](https://github.com/gophercloud/gophercloud/pull/3389) [v2] networking: add constants for statuses
+* [GH-3391](https://github.com/gophercloud/gophercloud/pull/3391) [v2] CI: Add Epoxy
+* [GH-3393](https://github.com/gophercloud/gophercloud/pull/3393) [v2] dns: implement shared zones list
+* [GH-3394](https://github.com/gophercloud/gophercloud/pull/3394) [v2] acceptance: Prevent 409 when bulk-creating secgroup rules
+* [GH-3396](https://github.com/gophercloud/gophercloud/pull/3396) [v2] identity: add support for string boolean in users' enabled member
+* [GH-3397](https://github.com/gophercloud/gophercloud/pull/3397) [v2] Adjust List func to accept a Builder in tenants, routers and security groups packages
+* [GH-3399](https://github.com/gophercloud/gophercloud/pull/3399) [v2] blockstorage: add manage-existing and unmanage api call
+* [GH-3401](https://github.com/gophercloud/gophercloud/pull/3401) [v2] Added address groups to Networking extensions, with tests.
+* [GH-3407](https://github.com/gophercloud/gophercloud/pull/3407) [v2] neutron: add segment_id support to subnets
+* [GH-3413](https://github.com/gophercloud/gophercloud/pull/3413) [v2] build(deps): bump joelanford/go-apidiff from 0.8.2 to 0.8.3
+* [GH-3416](https://github.com/gophercloud/gophercloud/pull/3416) [v2] tests: bump devstack-action
+* [GH-3422](https://github.com/gophercloud/gophercloud/pull/3422) [v2] Fix documentation for gateway_ip in subnet update
+* [GH-3431](https://github.com/gophercloud/gophercloud/pull/3431) [v2] Use container-infra for OpenStack-API-Version
+* [GH-3433](https://github.com/gophercloud/gophercloud/pull/3433) [v2] make: Use fixed version of gotestsum
+* [GH-3434](https://github.com/gophercloud/gophercloud/pull/3434) [v2] Randomize test order for unit tests
+* [GH-3435](https://github.com/gophercloud/gophercloud/pull/3435) [v2] Add versioned endpoint discovery
+* [GH-3438](https://github.com/gophercloud/gophercloud/pull/3438) [v2] dns: add support for /v2/quotas
+* [GH-3439](https://github.com/gophercloud/gophercloud/pull/3439) [v2] neutron: add segments extension package
+* [GH-3446](https://github.com/gophercloud/gophercloud/pull/3446) nova: add support for hostname updates
+* [GH-3452](https://github.com/gophercloud/gophercloud/pull/3452) [v2] neutron: allow omission of subnet_id for IP address
+* [GH-3454](https://github.com/gophercloud/gophercloud/pull/3454) [v2] blockstorage: add isPublic query option for volume types
+* [GH-3458](https://github.com/gophercloud/gophercloud/pull/3458) [v2] Fix pagination for messaging client
+* [GH-3465](https://github.com/gophercloud/gophercloud/pull/3465) [v2] tests: Fix TestVLANTransparentCRUD test
+* [GH-3466](https://github.com/gophercloud/gophercloud/pull/3466) [v2] tests: fix tests for recent PR backports
+* [GH-3469](https://github.com/gophercloud/gophercloud/pull/3469) [v2] tests: shorten GH-A job names
+* [GH-3473](https://github.com/gophercloud/gophercloud/pull/3473) [v2] core: clone service type aliases instead of referencing global slice
+* [GH-3475](https://github.com/gophercloud/gophercloud/pull/3475) [v2] Implement update & delete traits on resource provider
+* [GH-3476](https://github.com/gophercloud/gophercloud/pull/3476) [v2] tests: fix volumetypes unit tests
+* [GH-3477](https://github.com/gophercloud/gophercloud/pull/3477) [v2] script: Improve getenvvar helper
+* [GH-3481](https://github.com/gophercloud/gophercloud/pull/3481) [v2] Implement hypervisors.GetExt: Get with Query parameter
+* [GH-3487](https://github.com/gophercloud/gophercloud/pull/3487) [v2] Add networking taas tapmirror suite
+* [GH-3489](https://github.com/gophercloud/gophercloud/pull/3489) [v2] Fix incorrect ICMP field description in PortRangeMax comment
+* [GH-3494](https://github.com/gophercloud/gophercloud/pull/3494) [v2] Networking v2: Support two time formats for subnet, router, SG rule (#3492)
+* [GH-3495](https://github.com/gophercloud/gophercloud/pull/3495) [v2] build(deps): bump actions/checkout from 4 to 5
+
 ## v2.7.0 (2025-04-03)
 
 * [GH-3306](https://github.com/gophercloud/gophercloud/pull/3306) [v2] identity: Add Get endpoint by ID
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/Makefile b/openshift/vendor/github.com/gophercloud/gophercloud/v2/Makefile
index 2a0618a6b6..c63adb8d03 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/Makefile
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/Makefile
@@ -1,7 +1,9 @@
 undefine GOFLAGS
 
 GOLANGCI_LINT_VERSION?=v1.62.2
-GO_TEST?=go run gotest.tools/gotestsum@latest --format testname --
+GOTESTSUM_VERSION?=v1.12.2
+GO_TEST?=go run gotest.tools/gotestsum@$(GOTESTSUM_VERSION) --format testname --
+TIMEOUT := "60m"
 
 ifeq ($(shell command -v podman 2> /dev/null),)
 	RUNNER=docker
@@ -9,15 +11,18 @@ else
 	RUNNER=podman
 endif
 
-# if the golangci-lint steps fails with the following error message:
+# if the golangci-lint steps fails with one of the following error messages:
 #
 #   directory prefix . does not contain main module or its selected dependencies
 #
+#   failed to initialize build cache at /root/.cache/golangci-lint: mkdir /root/.cache/golangci-lint: permission denied
+#
 # you probably have to fix the SELinux security context for root directory plus your cache
 #
 #   chcon -Rt svirt_sandbox_file_t .
 #   chcon -Rt svirt_sandbox_file_t ~/.cache/golangci-lint
 lint:
+	mkdir -p ~/.cache/golangci-lint/$(GOLANGCI_LINT_VERSION)
 	$(RUNNER) run -t --rm \
 		-v $(shell pwd):/app \
 		-v ~/.cache/golangci-lint/$(GOLANGCI_LINT_VERSION):/root/.cache \
@@ -31,84 +36,88 @@ format:
 .PHONY: format
 
 unit:
-	$(GO_TEST) ./...
+	$(GO_TEST) -shuffle on ./...
 .PHONY: unit
 
 coverage:
-	$(GO_TEST) -covermode count -coverprofile cover.out -coverpkg=./... ./...
+	$(GO_TEST) -shuffle on -covermode count -coverprofile cover.out -coverpkg=./... ./...
 .PHONY: coverage
 
-acceptance: acceptance-baremetal acceptance-blockstorage acceptance-compute acceptance-container acceptance-containerinfra acceptance-db acceptance-dns acceptance-identity acceptance-imageservice acceptance-keymanager acceptance-loadbalancer acceptance-messaging acceptance-networking acceptance-objectstorage acceptance-orchestration acceptance-placement acceptance-sharedfilesystems acceptance-workflow
+acceptance: acceptance-basic acceptance-baremetal acceptance-blockstorage acceptance-compute acceptance-container acceptance-containerinfra acceptance-db acceptance-dns acceptance-identity acceptance-image acceptance-keymanager acceptance-loadbalancer acceptance-messaging acceptance-networking acceptance-objectstorage acceptance-orchestration acceptance-placement acceptance-sharedfilesystems acceptance-workflow
 .PHONY: acceptance
 
+acceptance-basic:
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack
+.PHONY: acceptance-basic
+
 acceptance-baremetal:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/baremetal/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/baremetal/...
 .PHONY: acceptance-baremetal
 
 acceptance-blockstorage:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/blockstorage/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/blockstorage/...
 .PHONY: acceptance-blockstorage
 
 acceptance-compute:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/compute/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/compute/...
 .PHONY: acceptance-compute
 
 acceptance-container:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/container/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/container/...
 .PHONY: acceptance-container
 
 acceptance-containerinfra:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/containerinfra/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/containerinfra/...
 .PHONY: acceptance-containerinfra
 
 acceptance-db:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/db/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/db/...
 .PHONY: acceptance-db
 
 acceptance-dns:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/dns/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/dns/...
 .PHONY: acceptance-dns
 
 acceptance-identity:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/identity/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/identity/...
 .PHONY: acceptance-identity
 
 acceptance-image:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/imageservice/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/image/...
 .PHONY: acceptance-image
 
 acceptance-keymanager:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/keymanager/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/keymanager/...
 .PHONY: acceptance-keymanager
 
 acceptance-loadbalancer:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/loadbalancer/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/loadbalancer/...
 .PHONY: acceptance-loadbalancer
 
 acceptance-messaging:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/messaging/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/messaging/...
 .PHONY: acceptance-messaging
 
 acceptance-networking:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/networking/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/networking/...
 .PHONY: acceptance-networking
 
 acceptance-objectstorage:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/objectstorage/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/objectstorage/...
 .PHONY: acceptance-objectstorage
 
 acceptance-orchestration:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/orchestration/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/orchestration/...
 .PHONY: acceptance-orchestration
 
 acceptance-placement:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/placement/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/placement/...
 .PHONY: acceptance-placement
 
 acceptance-sharedfilesystems:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/sharedfilesystems/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/sharedfilesystems/...
 .PHONY: acceptance-sharefilesystems
 
 acceptance-workflow:
-	$(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/workflow/...
+	$(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/workflow/...
 .PHONY: acceptance-workflow
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go
index 8818e769b8..34d76a1b8d 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go
@@ -79,6 +79,11 @@ type EndpointOpts struct {
 	// Required only for services that span multiple regions.
 	Region string
 
+	// Version [optional] is the major version of the service required. It it not
+	// a microversion. Use this to ensure the correct endpoint is selected when
+	// multiple API versions are available.
+	Version int
+
 	// Availability [optional] is the visibility of the endpoint to be returned.
 	// Valid types include the constants AvailabilityPublic, AvailabilityInternal,
 	// or AvailabilityAdmin from this package.
@@ -111,7 +116,7 @@ func (eo *EndpointOpts) ApplyDefaults(t string) {
 	if len(eo.Aliases) == 0 {
 		if aliases, ok := ServiceTypeAliases[eo.Type]; ok {
 			// happy path: user requested a service type by its official name
-			eo.Aliases = aliases
+			eo.Aliases = slices.Clone(aliases)
 		} else {
 			// unhappy path: user requested a service type by its alias or an
 			// invalid/unsupported service type
@@ -121,7 +126,7 @@ func (eo *EndpointOpts) ApplyDefaults(t string) {
 					// we intentionally override the service type, even if it
 					// was explicitly requested by the user
 					eo.Type = t
-					eo.Aliases = aliases
+					eo.Aliases = slices.Clone(aliases)
 				}
 			}
 		}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go
index 893787b787..9ecc5b4efe 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go
@@ -24,8 +24,8 @@ OS_PROJECT_NAME and the latter are expected against a v3 auth api.
 If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred
 as "tenant" in Gophercloud.
 
-If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to
-handle projects not on the default domain.
+If OS_PROJECT_NAME is set, it requires OS_DOMAIN_ID or OS_DOMAIN_NAME to be
+set as well to handle projects not on the default domain.
 
 To use this function, first set the OS_* environment variables (for example,
 by sourcing an `openrc` file), then:
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go
index 2ab4af93ee..e018b57a8d 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go
@@ -157,5 +157,12 @@ Example of Attaching a Volume to an Instance
 	if err != nil {
 		panic(err)
 	}
+
+Example of Unmanaging a Volume
+
+	err := volumes.Unmanage(context.TODO(), client, volume.ID).ExtractErr()
+	if err != nil {
+		panic(err)
+	}
 */
 package volumes
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go
index 77210943b5..1026d1ecaa 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go
@@ -623,6 +623,12 @@ func SetImageMetadata(ctx context.Context, client *gophercloud.ServiceClient, id
 	return
 }
 
+// BootableOptsBuilder allows extensions to add additional parameters to the
+// SetBootable request.
+type BootableOptsBuilder interface {
+	ToBootableMap() (map[string]any, error)
+}
+
 // BootableOpts contains options for setting bootable status to a volume.
 type BootableOpts struct {
 	// Enables or disables the bootable attribute. You can boot an instance from a bootable volume.
@@ -636,7 +642,7 @@ func (opts BootableOpts) ToBootableMap() (map[string]any, error) {
 }
 
 // SetBootable will set bootable status on a volume based on the values in BootableOpts
-func SetBootable(ctx context.Context, client *gophercloud.ServiceClient, id string, opts BootableOpts) (r SetBootableResult) {
+func SetBootable(ctx context.Context, client *gophercloud.ServiceClient, id string, opts BootableOptsBuilder) (r SetBootableResult) {
 	b, err := opts.ToBootableMap()
 	if err != nil {
 		r.Err = err
@@ -697,6 +703,12 @@ func ChangeType(ctx context.Context, client *gophercloud.ServiceClient, id strin
 	return
 }
 
+// ReImageOptsBuilder allows extensions to add additional parameters to the
+// ReImage request.
+type ReImageOptsBuilder interface {
+	ToReImageMap() (map[string]any, error)
+}
+
 // ReImageOpts contains options for Re-image a volume.
 type ReImageOpts struct {
 	// New image id
@@ -711,7 +723,7 @@ func (opts ReImageOpts) ToReImageMap() (map[string]any, error) {
 }
 
 // ReImage will re-image a volume based on the values in ReImageOpts
-func ReImage(ctx context.Context, client *gophercloud.ServiceClient, id string, opts ReImageOpts) (r ReImageResult) {
+func ReImage(ctx context.Context, client *gophercloud.ServiceClient, id string, opts ReImageOptsBuilder) (r ReImageResult) {
 	b, err := opts.ToReImageMap()
 	if err != nil {
 		r.Err = err
@@ -763,3 +775,14 @@ func ResetStatus(ctx context.Context, client *gophercloud.ServiceClient, id stri
 	_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
 	return
 }
+
+// Unmanage removes a volume from Block Storage management without
+// removing the back-end storage object that is associated with it.
+func Unmanage(ctx context.Context, client *gophercloud.ServiceClient, id string) (r UnmanageResult) {
+	body := map[string]any{"os-unmanage": make(map[string]any)}
+	resp, err := client.Post(ctx, actionURL(client, id), body, nil, &gophercloud.RequestOpts{
+		OkCodes: []int{202},
+	})
+	_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
+	return
+}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go
index 3f184b398e..e99ef5e197 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go
@@ -399,3 +399,8 @@ type ReImageResult struct {
 type ResetStatusResult struct {
 	gophercloud.ErrResult
 }
+
+// UnmanageResult contains the response error from a Unmanage request.
+type UnmanageResult struct {
+	gophercloud.ErrResult
+}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go
index 122a3ee699..73ca5c56d5 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go
@@ -2,6 +2,7 @@ package openstack
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"reflect"
 	"strings"
@@ -162,7 +163,7 @@ func v2auth(ctx context.Context, client *gophercloud.ProviderClient, endpoint st
 		}
 	}
 	client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
-		return V2EndpointURL(catalog, opts)
+		return V2Endpoint(context.TODO(), client, catalog, opts)
 	}
 
 	return nil
@@ -283,7 +284,7 @@ func v3auth(ctx context.Context, client *gophercloud.ProviderClient, endpoint st
 		}
 	}
 	client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
-		return V3EndpointURL(catalog, opts)
+		return V3Endpoint(context.TODO(), client, catalog, opts)
 	}
 
 	return nil
@@ -345,13 +346,20 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp
 }
 
 // TODO(stephenfin): Allow passing aliases to all New${SERVICE}V${VERSION} methods in v3
-func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) {
+func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string, version int) (*gophercloud.ServiceClient, error) {
 	sc := new(gophercloud.ServiceClient)
+
 	eo.ApplyDefaults(clientType)
+	if eo.Version != 0 && eo.Version != version {
+		return sc, errors.New("Conflict between requested service major version and manually set version")
+	}
+	eo.Version = version
+
 	url, err := client.EndpointLocator(eo)
 	if err != nil {
 		return sc, err
 	}
+
 	sc.ProviderClient = client
 	sc.Endpoint = url
 	sc.Type = clientType
@@ -361,7 +369,7 @@ func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointO
 // NewBareMetalV1 creates a ServiceClient that may be used with the v1
 // bare metal package.
 func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	sc, err := initClientOpts(client, eo, "baremetal")
+	sc, err := initClientOpts(client, eo, "baremetal", 1)
 	if !strings.HasSuffix(strings.TrimSuffix(sc.Endpoint, "/"), "v1") {
 		sc.ResourceBase = sc.Endpoint + "v1/"
 	}
@@ -371,25 +379,25 @@ func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointO
 // NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1
 // bare metal introspection package.
 func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "baremetal-introspection")
+	return initClientOpts(client, eo, "baremetal-introspection", 1)
 }
 
 // NewObjectStorageV1 creates a ServiceClient that may be used with the v1
 // object storage package.
 func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "object-store")
+	return initClientOpts(client, eo, "object-store", 1)
 }
 
 // NewComputeV2 creates a ServiceClient that may be used with the v2 compute
 // package.
 func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "compute")
+	return initClientOpts(client, eo, "compute", 2)
 }
 
 // NewNetworkV2 creates a ServiceClient that may be used with the v2 network
 // package.
 func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	sc, err := initClientOpts(client, eo, "network")
+	sc, err := initClientOpts(client, eo, "network", 2)
 	sc.ResourceBase = sc.Endpoint + "v2.0/"
 	return sc, err
 }
@@ -398,40 +406,40 @@ func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpt
 // NewBlockStorageV1 creates a ServiceClient that may be used to access the v1
 // block storage service.
 func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "volume")
+	return initClientOpts(client, eo, "volume", 1)
 }
 
 // NewBlockStorageV2 creates a ServiceClient that may be used to access the v2
 // block storage service.
 func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "block-storage")
+	return initClientOpts(client, eo, "block-storage", 2)
 }
 
 // NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service.
 func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "block-storage")
+	return initClientOpts(client, eo, "block-storage", 3)
 }
 
 // NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service.
 func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "shared-file-system")
+	return initClientOpts(client, eo, "shared-file-system", 2)
 }
 
 // NewOrchestrationV1 creates a ServiceClient that may be used to access the v1
 // orchestration service.
 func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "orchestration")
+	return initClientOpts(client, eo, "orchestration", 1)
 }
 
 // NewDBV1 creates a ServiceClient that may be used to access the v1 DB service.
 func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "database")
+	return initClientOpts(client, eo, "database", 1)
 }
 
 // NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS
 // service.
 func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	sc, err := initClientOpts(client, eo, "dns")
+	sc, err := initClientOpts(client, eo, "dns", 2)
 	sc.ResourceBase = sc.Endpoint + "v2/"
 	return sc, err
 }
@@ -439,7 +447,7 @@ func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (
 // NewImageV2 creates a ServiceClient that may be used to access the v2 image
 // service.
 func NewImageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	sc, err := initClientOpts(client, eo, "image")
+	sc, err := initClientOpts(client, eo, "image", 2)
 	sc.ResourceBase = sc.Endpoint + "v2/"
 	return sc, err
 }
@@ -447,7 +455,7 @@ func NewImageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts)
 // NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2
 // load balancer service.
 func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	sc, err := initClientOpts(client, eo, "load-balancer")
+	sc, err := initClientOpts(client, eo, "load-balancer", 2)
 
 	// Fixes edge case having an OpenStack lb endpoint with trailing version number.
 	endpoint := strings.Replace(sc.Endpoint, "v2.0/", "", -1)
@@ -459,20 +467,20 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi
 // NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging
 // service.
 func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	sc, err := initClientOpts(client, eo, "message")
+	sc, err := initClientOpts(client, eo, "message", 2)
 	sc.MoreHeaders = map[string]string{"Client-ID": clientID}
 	return sc, err
 }
 
 // NewContainerV1 creates a ServiceClient that may be used with v1 container package
 func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "application-container")
+	return initClientOpts(client, eo, "application-container", 1)
 }
 
 // NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key
 // manager service.
 func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	sc, err := initClientOpts(client, eo, "key-manager")
+	sc, err := initClientOpts(client, eo, "key-manager", 1)
 	sc.ResourceBase = sc.Endpoint + "v1/"
 	return sc, err
 }
@@ -480,15 +488,15 @@ func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.Endpoint
 // NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management
 // package.
 func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "container-infrastructure-management")
+	return initClientOpts(client, eo, "container-infrastructure-management", 1)
 }
 
 // NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package.
 func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "workflow")
+	return initClientOpts(client, eo, "workflow", 2)
 }
 
 // NewPlacementV1 creates a ServiceClient that may be used with the placement package.
 func NewPlacementV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
-	return initClientOpts(client, eo, "placement")
+	return initClientOpts(client, eo, "placement", 1)
 }
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go
index 44e8cccaeb..c0ccebfa49 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go
@@ -1,10 +1,12 @@
 package servers
 
 import (
+	"bytes"
 	"context"
 	"encoding/base64"
 	"encoding/json"
 	"fmt"
+	"io"
 	"maps"
 	"net"
 	"regexp"
@@ -651,6 +653,12 @@ type UpdateOpts struct {
 
 	// AccessIPv6 provides a new IPv6 address for the instance.
 	AccessIPv6 string `json:"accessIPv6,omitempty"`
+
+	// Hostname changes the hostname of the server.
+	// Requires microversion 2.90 or later.
+	// Note: This information is published via the metadata service and requires
+	// application such as cloud-init to propagate it through to the instance.
+	Hostname *string `json:"hostname,omitempty"`
 }
 
 // ToServerUpdateMap formats an UpdateOpts structure into a request body.
@@ -1044,10 +1052,35 @@ func CreateImage(ctx context.Context, client *gophercloud.ServiceClient, id stri
 		r.Err = err
 		return
 	}
+
 	resp, err := client.Post(ctx, actionURL(client, id), b, nil, &gophercloud.RequestOpts{
-		OkCodes: []int{202},
+		OkCodes:          []int{202},
+		KeepResponseBody: true,
 	})
+
 	_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
+	if r.Err != nil {
+		return
+	}
+	defer resp.Body.Close()
+
+	if v := r.Header.Get("Content-Type"); v != "application/json" {
+		return
+	}
+
+	// The response body is expected to be a small JSON object containing only "image_id".
+	// Read it fully into memory so the response body can be closed immediately.
+	// If the caller doesn't read from the buffer, it can still be safely garbage collected.
+
+	var buf bytes.Buffer
+
+	_, r.Err = io.Copy(&buf, resp.Body)
+	if r.Err != nil {
+		return
+	}
+
+	r.Body = &buf
+
 	return
 }
 
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go
index 385001c8dd..edc2740f68 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go
@@ -7,9 +7,11 @@ import (
 	"fmt"
 	"net/url"
 	"path"
+	"strconv"
 	"time"
 
 	"github.com/gophercloud/gophercloud/v2"
+	"github.com/gophercloud/gophercloud/v2/openstack/utils"
 	"github.com/gophercloud/gophercloud/v2/pagination"
 )
 
@@ -132,18 +134,49 @@ func (r CreateImageResult) ExtractImageID() (string, error) {
 	if r.Err != nil {
 		return "", r.Err
 	}
-	// Get the image id from the header
+
+	microversion := r.Header.Get("X-OpenStack-Nova-API-Version")
+
+	major, minor, err := utils.ParseMicroversion(microversion)
+	if err != nil {
+		return "", fmt.Errorf("failed to parse X-OpenStack-Nova-API-Version header: %s", err)
+	}
+
+	// In microversions prior to 2.45, the image ID was provided in the Location header.
+	if major < 2 || (major == 2 && minor < 45) {
+		return r.extractImageIDFromLocationHeader()
+	}
+
+	// Starting from 2.45, it is included in the response body.
+	return r.extractImageIDFromResponseBody()
+}
+
+func (r CreateImageResult) extractImageIDFromLocationHeader() (string, error) {
 	u, err := url.ParseRequestURI(r.Header.Get("Location"))
 	if err != nil {
 		return "", err
 	}
+
 	imageID := path.Base(u.Path)
 	if imageID == "." || imageID == "/" {
 		return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u)
 	}
+
 	return imageID, nil
 }
 
+func (r CreateImageResult) extractImageIDFromResponseBody() (string, error) {
+	var response struct {
+		ImageID string `json:"image_id"`
+	}
+
+	if err := r.ExtractInto(&response); err != nil {
+		return "", err
+	}
+
+	return response.ImageID, nil
+}
+
 // Server represents a server/instance in the OpenStack cloud.
 type Server struct {
 	// ID uniquely identifies this server amongst all other servers,
@@ -283,6 +316,9 @@ type Server struct {
 	// Locked indicates the lock status of the server
 	// This requires microversion 2.9 or later
 	Locked *bool `json:"locked"`
+
+	// ConfigDrive enables metadata injection through a configuration drive.
+	ConfigDrive bool `json:"-"`
 }
 
 type AttachedVolume struct {
@@ -343,6 +379,7 @@ func (r *Server) UnmarshalJSON(b []byte) error {
 		Image        any                             `json:"image"`
 		LaunchedAt   gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:launched_at"`
 		TerminatedAt gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:terminated_at"`
+		ConfigDrive  any                             `json:"config_drive"`
 	}
 	err := json.Unmarshal(b, &s)
 	if err != nil {
@@ -364,6 +401,24 @@ func (r *Server) UnmarshalJSON(b []byte) error {
 	r.LaunchedAt = time.Time(s.LaunchedAt)
 	r.TerminatedAt = time.Time(s.TerminatedAt)
 
+	switch t := s.ConfigDrive.(type) {
+	case nil:
+		r.ConfigDrive = false
+	case bool:
+		r.ConfigDrive = t
+	case string:
+		if t == "" {
+			r.ConfigDrive = false
+		} else {
+			r.ConfigDrive, err = strconv.ParseBool(t)
+			if err != nil {
+				return fmt.Errorf("failed to parse ConfigDrive %q: %v", t, err)
+			}
+		}
+	default:
+		return fmt.Errorf("unknown type for ConfigDrive: %T (value: %v)", t, t)
+	}
+
 	return err
 }
 
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go
new file mode 100644
index 0000000000..6178434423
--- /dev/null
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go
@@ -0,0 +1,190 @@
+package openstack
+
+import (
+	"context"
+	"regexp"
+	"slices"
+	"strconv"
+
+	"github.com/gophercloud/gophercloud/v2"
+	tokens2 "github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tokens"
+	tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens"
+	"github.com/gophercloud/gophercloud/v2/openstack/utils"
+)
+
+var versionedServiceTypeAliasRegexp = regexp.MustCompile(`^.*v(\d)$`)
+
+func extractServiceTypeVersion(serviceType string) int {
+	matches := versionedServiceTypeAliasRegexp.FindAllStringSubmatch(serviceType, 1)
+	if matches != nil {
+		// no point converting to an int
+		ret, err := strconv.Atoi(matches[0][1])
+		if err != nil {
+			return 0
+		}
+		return ret
+	}
+	return 0
+}
+
+func endpointSupportsVersion(ctx context.Context, client *gophercloud.ProviderClient, serviceType, endpointURL string, expectedVersion int) (bool, error) {
+	// Swift doesn't support version discovery :(
+	if expectedVersion == 0 || serviceType == "object-store" {
+		return true, nil
+	}
+
+	// Repeating verbatim from keystoneauth1 [1]:
+	//
+	// > The sins of our fathers become the blood on our hands.
+	// > If a user requests an old-style service type such as volumev2, then they
+	// > are inherently requesting the major API version 2. It's not a good
+	// > interface, but it's the one that was imposed on the world years ago
+	// > because the client libraries all hid the version discovery document.
+	// > In order to be able to ensure that a user who requests volumev2 does not
+	// > get a block-storage endpoint that only provides v3 of the block-storage
+	// > service, we need to pull the version out of the service_type. The
+	// > service-types-authority will prevent the growth of new monstrosities such
+	// > as this, but in order to move forward without breaking people, we have
+	// > to just cry in the corner while striking ourselves with thorned branches.
+	// > That said, for sure only do this hack for officially known service_types.
+	//
+	// So yeah, what mordred said.
+	//
+	// https://github.com/openstack/keystoneauth/blob/5.10.0/keystoneauth1/discover.py#L270-L290
+	impliedVersion := extractServiceTypeVersion(serviceType)
+	if impliedVersion != 0 && impliedVersion != expectedVersion {
+		return false, nil
+	}
+
+	// NOTE(stephenfin) In addition to the above, keystoneauth also supports a URL
+	// hack whereby it will extract the version from the URL. We may wish to
+	// implement this too.
+
+	endpointURL, err := utils.BaseVersionedEndpoint(endpointURL)
+	if err != nil {
+		return false, err
+	}
+
+	supportedVersions, err := utils.GetServiceVersions(ctx, client, endpointURL, false)
+	if err != nil {
+		return false, err
+	}
+
+	for _, supportedVersion := range supportedVersions {
+		if supportedVersion.Major == expectedVersion {
+			return true, nil
+		}
+	}
+
+	return false, nil
+}
+
+/*
+V2Endpoint discovers the endpoint URL for a specific service from a
+ServiceCatalog acquired during the v2 identity service.
+
+The specified EndpointOpts are used to identify a unique, unambiguous endpoint
+to return. It's an error both when multiple endpoints match the provided
+criteria and when none do. The minimum that can be specified is a Type, but you
+will also often need to specify a Name and/or a Region depending on what's
+available on your OpenStack deployment.
+*/
+func V2Endpoint(ctx context.Context, client *gophercloud.ProviderClient, catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) {
+	// Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided.
+	//
+	// If multiple endpoints are found, we return the first result and disregard the rest.
+	// This behavior matches the Python library. See GH-1764.
+	for _, entry := range catalog.Entries {
+		if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) {
+			for _, endpoint := range entry.Endpoints {
+				if opts.Region != "" && endpoint.Region != opts.Region {
+					continue
+				}
+
+				var endpointURL string
+				switch opts.Availability {
+				case gophercloud.AvailabilityPublic:
+					endpointURL = gophercloud.NormalizeURL(endpoint.PublicURL)
+				case gophercloud.AvailabilityInternal:
+					endpointURL = gophercloud.NormalizeURL(endpoint.InternalURL)
+				case gophercloud.AvailabilityAdmin:
+					endpointURL = gophercloud.NormalizeURL(endpoint.AdminURL)
+				default:
+					err := &ErrInvalidAvailabilityProvided{}
+					err.Argument = "Availability"
+					err.Value = opts.Availability
+					return "", err
+				}
+
+				endpointSupportsVersion, err := endpointSupportsVersion(ctx, client, entry.Type, endpointURL, opts.Version)
+				if err != nil {
+					return "", err
+				}
+				if !endpointSupportsVersion {
+					continue
+				}
+
+				return endpointURL, nil
+			}
+		}
+	}
+
+	// Report an error if there were no matching endpoints.
+	err := &gophercloud.ErrEndpointNotFound{}
+	return "", err
+}
+
+/*
+V3Endpoint discovers the endpoint URL for a specific service from a Catalog
+acquired during the v3 identity service.
+
+The specified EndpointOpts are used to identify a unique, unambiguous endpoint
+to return. It's an error both when multiple endpoints match the provided
+criteria and when none do. The minimum that can be specified is a Type, but you
+will also often need to specify a Name and/or a Region depending on what's
+available on your OpenStack deployment.
+*/
+func V3Endpoint(ctx context.Context, client *gophercloud.ProviderClient, catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) {
+	if opts.Availability != gophercloud.AvailabilityAdmin &&
+		opts.Availability != gophercloud.AvailabilityPublic &&
+		opts.Availability != gophercloud.AvailabilityInternal {
+		err := &ErrInvalidAvailabilityProvided{}
+		err.Argument = "Availability"
+		err.Value = opts.Availability
+		return "", err
+	}
+
+	// Extract Endpoints from the catalog entries that match the requested Type, Interface,
+	// Name if provided, and Region if provided.
+	//
+	// If multiple endpoints are found, we return the first result and disregard the rest.
+	// This behavior matches the Python library. See GH-1764.
+	for _, entry := range catalog.Entries {
+		if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) {
+			for _, endpoint := range entry.Endpoints {
+				if opts.Availability != gophercloud.Availability(endpoint.Interface) {
+					continue
+				}
+				if opts.Region != "" && endpoint.Region != opts.Region && endpoint.RegionID != opts.Region {
+					continue
+				}
+
+				endpointURL := gophercloud.NormalizeURL(endpoint.URL)
+
+				endpointSupportsVersion, err := endpointSupportsVersion(ctx, client, entry.Type, endpointURL, opts.Version)
+				if err != nil {
+					return "", err
+				}
+				if !endpointSupportsVersion {
+					continue
+				}
+
+				return endpointURL, nil
+			}
+		}
+	}
+
+	// Report an error if there were no matching endpoints.
+	err := &gophercloud.ErrEndpointNotFound{}
+	return "", err
+}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go
index 14cff0d755..573c1f06f4 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go
@@ -8,6 +8,8 @@ import (
 	tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens"
 )
 
+// TODO(stephenfin): Remove this module in v3. The functions below are no longer used.
+
 /*
 V2EndpointURL discovers the endpoint URL for a specific service from a
 ServiceCatalog acquired during the v2 identity service.
@@ -20,39 +22,33 @@ available on your OpenStack deployment.
 */
 func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) {
 	// Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided.
-	var endpoints = make([]tokens2.Endpoint, 0, 1)
+	//
+	// If multiple endpoints are found, we return the first result and disregard the rest.
+	// This behavior matches the Python library. See GH-1764.
 	for _, entry := range catalog.Entries {
 		if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) {
 			for _, endpoint := range entry.Endpoints {
-				if opts.Region == "" || endpoint.Region == opts.Region {
-					endpoints = append(endpoints, endpoint)
+				if opts.Region != "" && endpoint.Region != opts.Region {
+					continue
 				}
-			}
-		}
-	}
 
-	// If multiple endpoints were found, use the first result
-	// and disregard the other endpoints.
-	//
-	// This behavior matches the Python library. See GH-1764.
-	if len(endpoints) > 1 {
-		endpoints = endpoints[0:1]
-	}
+				var endpointURL string
+				switch opts.Availability {
+				case gophercloud.AvailabilityPublic:
+					endpointURL = gophercloud.NormalizeURL(endpoint.PublicURL)
+				case gophercloud.AvailabilityInternal:
+					endpointURL = gophercloud.NormalizeURL(endpoint.InternalURL)
+				case gophercloud.AvailabilityAdmin:
+					endpointURL = gophercloud.NormalizeURL(endpoint.AdminURL)
+				default:
+					err := &ErrInvalidAvailabilityProvided{}
+					err.Argument = "Availability"
+					err.Value = opts.Availability
+					return "", err
+				}
 
-	// Extract the appropriate URL from the matching Endpoint.
-	for _, endpoint := range endpoints {
-		switch opts.Availability {
-		case gophercloud.AvailabilityPublic:
-			return gophercloud.NormalizeURL(endpoint.PublicURL), nil
-		case gophercloud.AvailabilityInternal:
-			return gophercloud.NormalizeURL(endpoint.InternalURL), nil
-		case gophercloud.AvailabilityAdmin:
-			return gophercloud.NormalizeURL(endpoint.AdminURL), nil
-		default:
-			err := &ErrInvalidAvailabilityProvided{}
-			err.Argument = "Availability"
-			err.Value = opts.Availability
-			return "", err
+				return endpointURL, nil
+			}
 		}
 	}
 
@@ -72,41 +68,35 @@ will also often need to specify a Name and/or a Region depending on what's
 available on your OpenStack deployment.
 */
 func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) {
+	if opts.Availability != gophercloud.AvailabilityAdmin &&
+		opts.Availability != gophercloud.AvailabilityPublic &&
+		opts.Availability != gophercloud.AvailabilityInternal {
+		err := &ErrInvalidAvailabilityProvided{}
+		err.Argument = "Availability"
+		err.Value = opts.Availability
+		return "", err
+	}
+
 	// Extract Endpoints from the catalog entries that match the requested Type, Interface,
 	// Name if provided, and Region if provided.
-	var endpoints = make([]tokens3.Endpoint, 0, 1)
+	//
+	// If multiple endpoints are found, we return the first result and disregard the rest.
+	// This behavior matches the Python library. See GH-1764.
 	for _, entry := range catalog.Entries {
 		if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) {
 			for _, endpoint := range entry.Endpoints {
-				if opts.Availability != gophercloud.AvailabilityAdmin &&
-					opts.Availability != gophercloud.AvailabilityPublic &&
-					opts.Availability != gophercloud.AvailabilityInternal {
-					err := &ErrInvalidAvailabilityProvided{}
-					err.Argument = "Availability"
-					err.Value = opts.Availability
-					return "", err
+				if opts.Availability != gophercloud.Availability(endpoint.Interface) {
+					continue
 				}
-				if (opts.Availability == gophercloud.Availability(endpoint.Interface)) &&
-					(opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) {
-					endpoints = append(endpoints, endpoint)
+				if opts.Region != "" && endpoint.Region != opts.Region && endpoint.RegionID != opts.Region {
+					continue
 				}
+
+				return gophercloud.NormalizeURL(endpoint.URL), nil
 			}
 		}
 	}
 
-	// If multiple endpoints were found, use the first result
-	// and disregard the other endpoints.
-	//
-	// This behavior matches the Python library. See GH-1764.
-	if len(endpoints) > 1 {
-		endpoints = endpoints[0:1]
-	}
-
-	// Extract the URL from the matching Endpoint.
-	for _, endpoint := range endpoints {
-		return gophercloud.NormalizeURL(endpoint.URL), nil
-	}
-
 	// Report an error if there were no matching endpoints.
 	err := &gophercloud.ErrEndpointNotFound{}
 	return "", err
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go
index a08980df2c..84a8b9df1d 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go
@@ -7,6 +7,12 @@ import (
 	"github.com/gophercloud/gophercloud/v2/pagination"
 )
 
+// ListOptsBuilder allows extensions to add additional parameters to the
+// List request.
+type ListOptsBuilder interface {
+	ToTenantListQuery() (string, error)
+}
+
 // ListOpts filters the Tenants that are returned by the List call.
 type ListOpts struct {
 	// Marker is the ID of the last Tenant on the previous page.
@@ -16,15 +22,21 @@ type ListOpts struct {
 	Limit int `q:"limit"`
 }
 
+// ToTenantListQuery formats a ListOpts into a query string.
+func (opts ListOpts) ToTenantListQuery() (string, error) {
+	q, err := gophercloud.BuildQueryString(opts)
+	return q.String(), err
+}
+
 // List enumerates the Tenants to which the current token has access.
-func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager {
+func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
 	url := listURL(client)
 	if opts != nil {
-		q, err := gophercloud.BuildQueryString(opts)
+		query, err := opts.ToTenantListQuery()
 		if err != nil {
 			return pagination.Pager{Err: err}
 		}
-		url += q.String()
+		url += query
 	}
 	return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page {
 		return TenantPage{pagination.LinkedPageBase{PageResult: r}}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go
index 5b1f3d6882..1d4cb54928 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go
@@ -300,8 +300,7 @@ func Create(ctx context.Context, c *gophercloud.ServiceClient, opts tokens.AuthO
 	deleteBodyElements(b, "token")
 
 	resp, err := c.Post(ctx, ec2tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{
-		MoreHeaders: map[string]string{"X-Auth-Token": ""},
-		OkCodes:     []int{200},
+		OkCodes: []int{200},
 	})
 	_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
 	return
@@ -320,8 +319,7 @@ func ValidateS3Token(ctx context.Context, c *gophercloud.ServiceClient, opts tok
 	deleteBodyElements(b, "body_hash", "headers", "host", "params", "path", "verb")
 
 	resp, err := c.Post(ctx, s3tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{
-		MoreHeaders: map[string]string{"X-Auth-Token": ""},
-		OkCodes:     []int{200},
+		OkCodes: []int{200},
 	})
 	_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
 	return
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go
index 8c66b36e20..0b23269ffa 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go
@@ -214,6 +214,12 @@ func GetConsumer(ctx context.Context, client *gophercloud.ServiceClient, id stri
 	return
 }
 
+// UpdateConsumerOptsBuilder allows extensions to add additional parameters to the
+// UpdateConsumer request.
+type UpdateConsumerOptsBuilder interface {
+	ToOAuth1UpdateConsumerMap() (map[string]any, error)
+}
+
 // UpdateConsumerOpts provides options used to update a consumer.
 type UpdateConsumerOpts struct {
 	// Description is the consumer description.
@@ -227,7 +233,7 @@ func (opts UpdateConsumerOpts) ToOAuth1UpdateConsumerMap() (map[string]any, erro
 }
 
 // UpdateConsumer updates an existing Consumer.
-func UpdateConsumer(ctx context.Context, client *gophercloud.ServiceClient, id string, opts UpdateConsumerOpts) (r UpdateConsumerResult) {
+func UpdateConsumer(ctx context.Context, client *gophercloud.ServiceClient, id string, opts UpdateConsumerOptsBuilder) (r UpdateConsumerResult) {
 	b, err := opts.ToOAuth1UpdateConsumerMap()
 	if err != nil {
 		r.Err = err
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go
index 147be19927..eedc13a330 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go
@@ -13,10 +13,14 @@ const (
 	// been reserved for an image in the image registry.
 	ImageStatusQueued ImageStatus = "queued"
 
-	// ImageStatusSaving denotes that an image’s raw data is currently being
+	// ImageStatusSaving denotes that an image's raw data is currently being
 	// uploaded to Glance
 	ImageStatusSaving ImageStatus = "saving"
 
+	// ImageStatusUploading denotes that an image's raw data is currently being
+	// uploaded to Glance through the upload process
+	ImageStatusUploading ImageStatus = "uploading"
+
 	// ImageStatusActive denotes an image that is fully available in Glance.
 	ImageStatusActive ImageStatus = "active"
 
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go
index 710a6edf5b..67196a5202 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go
@@ -127,7 +127,7 @@ func (opts UpdateOpts) ToFlavorUpdateMap() (map[string]any, error) {
 
 // Update is an operation which modifies the attributes of the specified
 // Flavor.
-func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) {
+func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
 	b, err := opts.ToFlavorUpdateMap()
 	if err != nil {
 		r.Err = err
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go
index 62a4f179ee..ab0b22c6bc 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go
@@ -263,6 +263,12 @@ func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts U
 	return
 }
 
+// CreateRuleOptsBuilder allows extensions to add additional parameters to the
+// CreateRule request.
+type CreateRuleOptsBuilder interface {
+	ToRuleCreateMap() (map[string]any, error)
+}
+
 // CreateRuleOpts is the common options struct used in this package's CreateRule
 // operation.
 type CreateRuleOpts struct {
@@ -300,7 +306,7 @@ func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]any, error) {
 }
 
 // CreateRule will create and associate a Rule with a particular L7Policy.
-func CreateRule(ctx context.Context, c *gophercloud.ServiceClient, policyID string, opts CreateRuleOpts) (r CreateRuleResult) {
+func CreateRule(ctx context.Context, c *gophercloud.ServiceClient, policyID string, opts CreateRuleOptsBuilder) (r CreateRuleResult) {
 	b, err := opts.ToRuleCreateMap()
 	if err != nil {
 		r.Err = err
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go
index 3216fbddd0..abd5d08970 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go
@@ -380,7 +380,7 @@ func (opts UpdateOpts) ToListenerUpdateMap() (map[string]any, error) {
 
 // Update is an operation which modifies the attributes of the specified
 // Listener.
-func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) {
+func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
 	b, err := opts.ToListenerUpdateMap()
 	if err != nil {
 		r.Err = err
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go
index f815806f39..095170edd3 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go
@@ -208,7 +208,7 @@ func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]any, error) {
 
 // Update is an operation which modifies the attributes of the specified
 // LoadBalancer.
-func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) {
+func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
 	b, err := opts.ToLoadBalancerUpdateMap()
 	if err != nil {
 		r.Err = err
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go
index be5701c5f4..15a503badc 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go
@@ -2,6 +2,7 @@ package monitors
 
 import (
 	"context"
+	"strconv"
 
 	"github.com/gophercloud/gophercloud/v2"
 	"github.com/gophercloud/gophercloud/v2/pagination"
@@ -153,7 +154,25 @@ type CreateOpts struct {
 
 // ToMonitorCreateMap builds a request body from CreateOpts.
 func (opts CreateOpts) ToMonitorCreateMap() (map[string]any, error) {
-	return gophercloud.BuildRequestBody(opts, "healthmonitor")
+	b, err := gophercloud.BuildRequestBody(opts, "healthmonitor")
+	if err != nil {
+		return nil, err
+	}
+
+	if v, ok := b["healthmonitor"]; ok {
+		if m, ok := v.(map[string]any); ok {
+			if v, ok := m["http_version"]; ok {
+				if v, ok := v.(string); ok {
+					m["http_version"], err = strconv.ParseFloat(v, 64)
+					if err != nil {
+						return nil, err
+					}
+				}
+			}
+		}
+	}
+
+	return b, nil
 }
 
 /*
@@ -247,7 +266,25 @@ type UpdateOpts struct {
 
 // ToMonitorUpdateMap builds a request body from UpdateOpts.
 func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]any, error) {
-	return gophercloud.BuildRequestBody(opts, "healthmonitor")
+	b, err := gophercloud.BuildRequestBody(opts, "healthmonitor")
+	if err != nil {
+		return nil, err
+	}
+
+	if v, ok := b["healthmonitor"]; ok {
+		if m, ok := v.(map[string]any); ok {
+			if v, ok := m["http_version"]; ok {
+				if v, ok := v.(string); ok {
+					m["http_version"], err = strconv.ParseFloat(v, 64)
+					if err != nil {
+						return nil, err
+					}
+				}
+			}
+		}
+	}
+
+	return b, nil
 }
 
 // Update is an operation which modifies the attributes of the specified
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go
index 644ef18700..6e8563faaa 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go
@@ -1,6 +1,9 @@
 package monitors
 
 import (
+	"encoding/json"
+	"strconv"
+
 	"github.com/gophercloud/gophercloud/v2"
 	"github.com/gophercloud/gophercloud/v2/pagination"
 )
@@ -61,7 +64,7 @@ type Monitor struct {
 	HTTPMethod string `json:"http_method"`
 
 	// The HTTP version that the monitor uses for requests.
-	HTTPVersion string `json:"http_version"`
+	HTTPVersion string `json:"-"`
 
 	// The HTTP path of the request sent by the monitor to test the health of a
 	// member. Must be a string beginning with a forward slash (/).
@@ -96,6 +99,26 @@ type Monitor struct {
 	Tags []string `json:"tags"`
 }
 
+func (r *Monitor) UnmarshalJSON(b []byte) error {
+	type tmp Monitor
+	var s struct {
+		tmp
+		HTTPVersion float64 `json:"http_version"`
+	}
+
+	err := json.Unmarshal(b, &s)
+	if err != nil {
+		return err
+	}
+
+	*r = Monitor(s.tmp)
+	if s.HTTPVersion != 0 {
+		r.HTTPVersion = strconv.FormatFloat(s.HTTPVersion, 'f', 1, 64)
+	}
+
+	return nil
+}
+
 // MonitorPage is the page returned by a pager when traversing over a
 // collection of health monitors.
 type MonitorPage struct {
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go
new file mode 100644
index 0000000000..85dff7818c
--- /dev/null
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go
@@ -0,0 +1,7 @@
+package floatingips
+
+const (
+	StatusActive = "ACTIVE"
+	StatusDown   = "DOWN"
+	StatusError  = "ERROR"
+)
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go
index f6ca654841..def4699db3 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go
@@ -8,6 +8,12 @@ import (
 	"github.com/gophercloud/gophercloud/v2/pagination"
 )
 
+// ListOptsBuilder allows extensions to add additional parameters to the List
+// request.
+type ListOptsBuilder interface {
+	ToRouterListQuery() (string, error)
+}
+
 // ListOpts allows the filtering and sorting of paginated collections through
 // the API. Filtering is achieved by passing in struct field values that map to
 // the floating IP attributes you want to see returned. SortKey allows you to
@@ -33,19 +39,31 @@ type ListOpts struct {
 	RevisionNumber *int   `q:"revision_number"`
 }
 
+// ToRouterListQuery formats a ListOpts into a query string.
+func (opts ListOpts) ToRouterListQuery() (string, error) {
+	q, err := gophercloud.BuildQueryString(&opts)
+	if err != nil {
+		return "", err
+	}
+	return q.String(), nil
+}
+
 // List returns a Pager which allows you to iterate over a collection of
 // routers. It accepts a ListOpts struct, which allows you to filter and sort
 // the returned collection for greater efficiency.
 //
 // Default policy settings return only those routers that are owned by the
 // tenant who submits the request, unless an admin user submits the request.
-func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
-	q, err := gophercloud.BuildQueryString(&opts)
-	if err != nil {
-		return pagination.Pager{Err: err}
+func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
+	url := rootURL(c)
+	if opts != nil {
+		query, err := opts.ToRouterListQuery()
+		if err != nil {
+			return pagination.Pager{Err: err}
+		}
+		url += query
 	}
-	u := rootURL(c) + q.String()
-	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
+	return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
 		return RouterPage{pagination.LinkedPageBase{PageResult: r}}
 	})
 }
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go
index d75615b773..d657160ba2 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go
@@ -21,7 +21,7 @@ type GatewayInfo struct {
 // router.
 type ExternalFixedIP struct {
 	IPAddress string `json:"ip_address,omitempty"`
-	SubnetID  string `json:"subnet_id"`
+	SubnetID  string `json:"subnet_id,omitempty"`
 }
 
 // Route is a possible route in a router.
@@ -82,10 +82,48 @@ type Router struct {
 	RevisionNumber int `json:"revision_number"`
 
 	// Timestamp when the router was created
-	CreatedAt time.Time `json:"created_at"`
+	CreatedAt time.Time `json:"-"`
 
 	// Timestamp when the router was last updated
-	UpdatedAt time.Time `json:"updated_at"`
+	UpdatedAt time.Time `json:"-"`
+}
+
+func (r *Router) UnmarshalJSON(b []byte) error {
+	type tmp Router
+
+	// Support for older neutron time format
+	var s1 struct {
+		tmp
+		CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"`
+		UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"`
+	}
+
+	err := json.Unmarshal(b, &s1)
+	if err == nil {
+		*r = Router(s1.tmp)
+		r.CreatedAt = time.Time(s1.CreatedAt)
+		r.UpdatedAt = time.Time(s1.UpdatedAt)
+
+		return nil
+	}
+
+	// Support for newer neutron time format
+	var s2 struct {
+		tmp
+		CreatedAt time.Time `json:"created_at"`
+		UpdatedAt time.Time `json:"updated_at"`
+	}
+
+	err = json.Unmarshal(b, &s2)
+	if err != nil {
+		return err
+	}
+
+	*r = Router(s2.tmp)
+	r.CreatedAt = time.Time(s2.CreatedAt)
+	r.UpdatedAt = time.Time(s2.UpdatedAt)
+
+	return nil
 }
 
 // RouterPage is the page returned by a pager when traversing over a
@@ -122,11 +160,14 @@ func (r RouterPage) IsEmpty() (bool, error) {
 // and extracts the elements into a slice of Router structs. In other words,
 // a generic collection is mapped into a relevant slice.
 func ExtractRouters(r pagination.Page) ([]Router, error) {
-	var s struct {
-		Routers []Router `json:"routers"`
-	}
-	err := (r.(RouterPage)).ExtractInto(&s)
-	return s.Routers, err
+	var s []Router
+	err := ExtractRoutersInto(r, &s)
+	return s, err
+}
+
+// ExtractRoutersInto extracts the elements into a slice of Router structs.
+func ExtractRoutersInto(r pagination.Page, v any) error {
+	return r.(RouterPage).Result.ExtractIntoSlicePtr(v, "routers")
 }
 
 type commonResult struct {
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go
index 77768a3dac..edd253f037 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go
@@ -7,41 +7,60 @@ import (
 	"github.com/gophercloud/gophercloud/v2/pagination"
 )
 
+// ListOptsBuilder allows extensions to add additional parameters to the List
+// request.
+type ListOptsBuilder interface {
+	ToSecGroupListQuery() (string, error)
+}
+
 // ListOpts allows the filtering and sorting of paginated collections through
 // the API. Filtering is achieved by passing in struct field values that map to
 // the security group rule attributes you want to see returned. SortKey allows
 // you to sort by a particular network attribute. SortDir sets the direction,
 // and is either `asc' or `desc'. Marker and Limit are used for pagination.
 type ListOpts struct {
-	Direction      string `q:"direction"`
-	EtherType      string `q:"ethertype"`
-	ID             string `q:"id"`
-	Description    string `q:"description"`
-	PortRangeMax   int    `q:"port_range_max"`
-	PortRangeMin   int    `q:"port_range_min"`
-	Protocol       string `q:"protocol"`
-	RemoteGroupID  string `q:"remote_group_id"`
-	RemoteIPPrefix string `q:"remote_ip_prefix"`
-	SecGroupID     string `q:"security_group_id"`
-	TenantID       string `q:"tenant_id"`
-	ProjectID      string `q:"project_id"`
-	Limit          int    `q:"limit"`
-	Marker         string `q:"marker"`
-	SortKey        string `q:"sort_key"`
-	SortDir        string `q:"sort_dir"`
-	RevisionNumber *int   `q:"revision_number"`
+	Direction            string `q:"direction"`
+	EtherType            string `q:"ethertype"`
+	ID                   string `q:"id"`
+	Description          string `q:"description"`
+	PortRangeMax         int    `q:"port_range_max"`
+	PortRangeMin         int    `q:"port_range_min"`
+	Protocol             string `q:"protocol"`
+	RemoteAddressGroupID string `q:"remote_address_group_id"`
+	RemoteGroupID        string `q:"remote_group_id"`
+	RemoteIPPrefix       string `q:"remote_ip_prefix"`
+	SecGroupID           string `q:"security_group_id"`
+	TenantID             string `q:"tenant_id"`
+	ProjectID            string `q:"project_id"`
+	Limit                int    `q:"limit"`
+	Marker               string `q:"marker"`
+	SortKey              string `q:"sort_key"`
+	SortDir              string `q:"sort_dir"`
+	RevisionNumber       *int   `q:"revision_number"`
+}
+
+// ToSecGroupListQuery formats a ListOpts into a query string.
+func (opts ListOpts) ToSecGroupListQuery() (string, error) {
+	q, err := gophercloud.BuildQueryString(&opts)
+	if err != nil {
+		return "", err
+	}
+	return q.String(), nil
 }
 
 // List returns a Pager which allows you to iterate over a collection of
 // security group rules. It accepts a ListOpts struct, which allows you to filter
 // and sort the returned collection for greater efficiency.
-func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
-	q, err := gophercloud.BuildQueryString(&opts)
-	if err != nil {
-		return pagination.Pager{Err: err}
+func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
+	url := rootURL(c)
+	if opts != nil {
+		query, err := opts.ToSecGroupListQuery()
+		if err != nil {
+			return pagination.Pager{Err: err}
+		}
+		url += query
 	}
-	u := rootURL(c) + q.String()
-	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
+	return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
 		return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}}
 	})
 }
@@ -106,7 +125,7 @@ type CreateOpts struct {
 
 	// The maximum port number in the range that is matched by the security group
 	// rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If
-	// the protocol is ICMP, this value must be an ICMP type.
+	// the protocol is ICMP, this value must be an ICMP code.
 	PortRangeMax int `json:"port_range_max,omitempty"`
 
 	// The minimum port number in the range that is matched by the security group
@@ -119,12 +138,16 @@ type CreateOpts struct {
 	// "tcp", "udp", "icmp" or an empty string.
 	Protocol RuleProtocol `json:"protocol,omitempty"`
 
+	// The remote address group ID to be associated with this security group rule.
+	// You can specify either RemoteAddressGroupID, RemoteGroupID, or RemoteIPPrefix
+	RemoteAddressGroupID string `json:"remote_address_group_id,omitempty"`
+
 	// The remote group ID to be associated with this security group rule. You can
-	// specify either RemoteGroupID or RemoteIPPrefix.
+	// specify either RemoteAddressGroupID,RemoteGroupID or RemoteIPPrefix.
 	RemoteGroupID string `json:"remote_group_id,omitempty"`
 
 	// The remote IP prefix to be associated with this security group rule. You can
-	// specify either RemoteGroupID or RemoteIPPrefix. This attribute matches the
+	// specify either RemoteAddressGroupID,RemoteGroupID or RemoteIPPrefix. This attribute matches the
 	// specified IP prefix as the source IP address of the IP packet.
 	RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"`
 
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go
index 8a3355dfe0..03696ac203 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go
@@ -1,6 +1,7 @@
 package rules
 
 import (
+	"encoding/json"
 	"time"
 
 	"github.com/gophercloud/gophercloud/v2"
@@ -44,6 +45,10 @@ type SecGroupRule struct {
 	// "tcp", "udp", "icmp" or an empty string.
 	Protocol string
 
+	// The remote address group ID to be associated with this security group rule.
+	// You can specify either RemoteAddressGroupID, RemoteGroupID, or RemoteIPPrefix
+	RemoteAddressGroupID string `json:"remote_address_group_id"`
+
 	// The remote group ID to be associated with this security group rule. You
 	// can specify either RemoteGroupID or RemoteIPPrefix.
 	RemoteGroupID string `json:"remote_group_id"`
@@ -63,10 +68,48 @@ type SecGroupRule struct {
 	RevisionNumber int `json:"revision_number"`
 
 	// Timestamp when the rule was created
-	CreatedAt time.Time `json:"created_at"`
+	CreatedAt time.Time `json:"-"`
 
 	// Timestamp when the rule was last updated
-	UpdatedAt time.Time `json:"updated_at"`
+	UpdatedAt time.Time `json:"-"`
+}
+
+func (r *SecGroupRule) UnmarshalJSON(b []byte) error {
+	type tmp SecGroupRule
+
+	// Support for older neutron time format
+	var s1 struct {
+		tmp
+		CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"`
+		UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"`
+	}
+
+	err := json.Unmarshal(b, &s1)
+	if err == nil {
+		*r = SecGroupRule(s1.tmp)
+		r.CreatedAt = time.Time(s1.CreatedAt)
+		r.UpdatedAt = time.Time(s1.UpdatedAt)
+
+		return nil
+	}
+
+	// Support for newer neutron time format
+	var s2 struct {
+		tmp
+		CreatedAt time.Time `json:"created_at"`
+		UpdatedAt time.Time `json:"updated_at"`
+	}
+
+	err = json.Unmarshal(b, &s2)
+	if err != nil {
+		return err
+	}
+
+	*r = SecGroupRule(s2.tmp)
+	r.CreatedAt = time.Time(s2.CreatedAt)
+	r.UpdatedAt = time.Time(s2.UpdatedAt)
+
+	return nil
 }
 
 // SecGroupRulePage is the page returned by a pager when traversing over a
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go
new file mode 100644
index 0000000000..6bec77fa79
--- /dev/null
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go
@@ -0,0 +1,9 @@
+package trunks
+
+const (
+	StatusActive   = "ACTIVE"
+	StatusBuild    = "BUILD"
+	StatusDegraded = "DEGRADED"
+	StatusDown     = "DOWN"
+	StatusError    = "ERROR"
+)
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go
new file mode 100644
index 0000000000..1214ce9deb
--- /dev/null
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go
@@ -0,0 +1,8 @@
+package networks
+
+const (
+	StatusActive = "ACTIVE"
+	StatusBuild  = "BUILD"
+	StatusDown   = "DOWN"
+	StatusError  = "ERROR"
+)
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go
new file mode 100644
index 0000000000..6275839bf4
--- /dev/null
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go
@@ -0,0 +1,8 @@
+package ports
+
+const (
+	StatusActive = "ACTIVE"
+	StatusBuild  = "BUILD"
+	StatusDown   = "DOWN"
+	StatusError  = "ERROR"
+)
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go
index 74a0fa3b49..db223d48c1 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go
@@ -49,7 +49,7 @@ type DeleteResult struct {
 
 // IP is a sub-struct that represents an individual IP.
 type IP struct {
-	SubnetID  string `json:"subnet_id"`
+	SubnetID  string `json:"subnet_id,omitempty"`
 	IPAddress string `json:"ip_address,omitempty"`
 }
 
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go
index 150afd7394..85c5d2b402 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go
@@ -43,6 +43,7 @@ type ListOpts struct {
 	NotTags           string `q:"not-tags"`
 	NotTagsAny        string `q:"not-tags-any"`
 	RevisionNumber    *int   `q:"revision_number"`
+	SegmentID         string `q:"segment_id"`
 }
 
 // ToSubnetListQuery formats a ListOpts into a query string.
@@ -147,6 +148,10 @@ type CreateOpts struct {
 	// Prefixlen is used when user creates a subnet from the subnetpool. It will
 	// overwrite the "default_prefixlen" value of the referenced subnetpool.
 	Prefixlen int `json:"prefixlen,omitempty"`
+
+	// SegmentID is a network segment the subnet is associated with. It is
+	// available when segment extension is enabled.
+	SegmentID string `json:"segment_id,omitempty"`
 }
 
 // ToSubnetCreateMap builds a request body from CreateOpts.
@@ -194,9 +199,8 @@ type UpdateOpts struct {
 	// AllocationPools are IP Address pools that will be available for DHCP.
 	AllocationPools []AllocationPool `json:"allocation_pools,omitempty"`
 
-	// GatewayIP sets gateway information for the subnet. Setting to nil will
-	// cause a default gateway to automatically be created. Setting to an empty
-	// string will cause the subnet to be created with no gateway. Setting to
+	// GatewayIP sets gateway information for the subnet. Setting to an empty
+	// string will cause the subnet to not have a gateway. Setting to
 	// an explicit address will set that address as the gateway.
 	GatewayIP *string `json:"gateway_ip,omitempty"`
 
@@ -219,6 +223,10 @@ type UpdateOpts struct {
 	// will set revision_number=%s. If the revision number does not match, the
 	// update will fail.
 	RevisionNumber *int `json:"-" h:"If-Match"`
+
+	// SegmentID is a network segment the subnet is associated with. It is
+	// available when segment extension is enabled.
+	SegmentID *string `json:"segment_id,omitempty"`
 }
 
 // ToSubnetUpdateMap builds a request body from UpdateOpts.
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go
index 01c6acc070..4f0aa8408d 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go
@@ -1,6 +1,7 @@
 package subnets
 
 import (
+	"encoding/json"
 	"time"
 
 	"github.com/gophercloud/gophercloud/v2"
@@ -124,11 +125,53 @@ type Subnet struct {
 	// RevisionNumber optionally set via extensions/standard-attr-revisions
 	RevisionNumber int `json:"revision_number"`
 
+	// SegmentID of a network segment the subnet is associated with. It is
+	// available when segment extension is enabled.
+	SegmentID string `json:"segment_id"`
+
 	// Timestamp when the subnet was created
-	CreatedAt time.Time `json:"created_at"`
+	CreatedAt time.Time `json:"-"`
 
 	// Timestamp when the subnet was last updated
-	UpdatedAt time.Time `json:"updated_at"`
+	UpdatedAt time.Time `json:"-"`
+}
+
+func (r *Subnet) UnmarshalJSON(b []byte) error {
+	type tmp Subnet
+
+	// Support for older neutron time format
+	var s1 struct {
+		tmp
+		CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"`
+		UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"`
+	}
+
+	err := json.Unmarshal(b, &s1)
+	if err == nil {
+		*r = Subnet(s1.tmp)
+		r.CreatedAt = time.Time(s1.CreatedAt)
+		r.UpdatedAt = time.Time(s1.UpdatedAt)
+
+		return nil
+	}
+
+	// Support for newer neutron time format
+	var s2 struct {
+		tmp
+		CreatedAt time.Time `json:"created_at"`
+		UpdatedAt time.Time `json:"updated_at"`
+	}
+
+	err = json.Unmarshal(b, &s2)
+	if err != nil {
+		return err
+	}
+
+	*r = Subnet(s2.tmp)
+	r.CreatedAt = time.Time(s2.CreatedAt)
+	r.UpdatedAt = time.Time(s2.UpdatedAt)
+
+	return nil
 }
 
 // SubnetPage is the page returned by a pager when traversing over a collection
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go
index 40080f7af2..f219c0bf4d 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go
@@ -6,9 +6,7 @@ import (
 	"strings"
 )
 
-// BaseEndpoint will return a URL without the /vX.Y
-// portion of the URL.
-func BaseEndpoint(endpoint string) (string, error) {
+func parseEndpoint(endpoint string, includeVersion bool) (string, error) {
 	u, err := url.Parse(endpoint)
 	if err != nil {
 		return "", err
@@ -21,8 +19,23 @@ func BaseEndpoint(endpoint string) (string, error) {
 
 	if version := versionRe.FindString(path); version != "" {
 		versionIndex := strings.Index(path, version)
+		if includeVersion {
+			versionIndex += len(version)
+		}
 		u.Path = path[:versionIndex]
 	}
 
 	return u.String(), nil
 }
+
+// BaseEndpoint will return a URL without the /vX.Y
+// portion of the URL.
+func BaseEndpoint(endpoint string) (string, error) {
+	return parseEndpoint(endpoint, false)
+}
+
+// BaseVersionedEndpoint will return a URL with the /vX.Y portion of the URL,
+// if present, but without a project ID or similar
+func BaseVersionedEndpoint(endpoint string) (string, error) {
+	return parseEndpoint(endpoint, true)
+}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go
index 6c720e57ef..ccc56345a6 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go
@@ -3,7 +3,6 @@ package utils
 import (
 	"context"
 	"fmt"
-	"strconv"
 	"strings"
 
 	"github.com/gophercloud/gophercloud/v2"
@@ -29,6 +28,7 @@ var goodStatus = map[string]bool{
 // It returns the highest-Priority Version, OR exact match with client endpoint,
 // among the alternatives that are provided, as well as its corresponding endpoint.
 func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) {
+	// TODO(stephenfin): This could be removed since we can accomplish this with GetServiceVersions now.
 	type linkResp struct {
 		Href string `json:"href"`
 		Rel  string `json:"rel"`
@@ -114,123 +114,3 @@ func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, reco
 
 	return highest, endpoint, nil
 }
-
-type SupportedMicroversions struct {
-	MaxMajor int
-	MaxMinor int
-	MinMajor int
-	MinMinor int
-}
-
-// GetSupportedMicroversions returns the minimum and maximum microversion that is supported by the ServiceClient Endpoint.
-func GetSupportedMicroversions(ctx context.Context, client *gophercloud.ServiceClient) (SupportedMicroversions, error) {
-	type valueResp struct {
-		ID         string `json:"id"`
-		Status     string `json:"status"`
-		Version    string `json:"version"`
-		MinVersion string `json:"min_version"`
-	}
-
-	type response struct {
-		Version  valueResp   `json:"version"`
-		Versions []valueResp `json:"versions"`
-	}
-	var minVersion, maxVersion string
-	var supportedMicroversions SupportedMicroversions
-	var resp response
-	_, err := client.Get(ctx, client.Endpoint, &resp, &gophercloud.RequestOpts{
-		OkCodes: []int{200, 300},
-	})
-
-	if err != nil {
-		return supportedMicroversions, err
-	}
-
-	if len(resp.Versions) > 0 {
-		// We are dealing with an unversioned endpoint
-		// We only handle the case when there is exactly one, and assume it is the correct one
-		if len(resp.Versions) > 1 {
-			return supportedMicroversions, fmt.Errorf("unversioned endpoint with multiple alternatives not supported")
-		}
-		minVersion = resp.Versions[0].MinVersion
-		maxVersion = resp.Versions[0].Version
-	} else {
-		minVersion = resp.Version.MinVersion
-		maxVersion = resp.Version.Version
-	}
-
-	// Return early if the endpoint does not support microversions
-	if minVersion == "" && maxVersion == "" {
-		return supportedMicroversions, fmt.Errorf("microversions not supported by ServiceClient Endpoint")
-	}
-
-	supportedMicroversions.MinMajor, supportedMicroversions.MinMinor, err = ParseMicroversion(minVersion)
-	if err != nil {
-		return supportedMicroversions, err
-	}
-
-	supportedMicroversions.MaxMajor, supportedMicroversions.MaxMinor, err = ParseMicroversion(maxVersion)
-	if err != nil {
-		return supportedMicroversions, err
-	}
-
-	return supportedMicroversions, nil
-}
-
-// RequireMicroversion checks that the required microversion is supported and
-// returns a ServiceClient with the microversion set.
-func RequireMicroversion(ctx context.Context, client gophercloud.ServiceClient, required string) (gophercloud.ServiceClient, error) {
-	supportedMicroversions, err := GetSupportedMicroversions(ctx, &client)
-	if err != nil {
-		return client, fmt.Errorf("unable to determine supported microversions: %w", err)
-	}
-	supported, err := supportedMicroversions.IsSupported(required)
-	if err != nil {
-		return client, err
-	}
-	if !supported {
-		return client, fmt.Errorf("microversion %s not supported. Supported versions: %v", required, supportedMicroversions)
-	}
-	client.Microversion = required
-	return client, nil
-}
-
-// IsSupported checks if a microversion falls in the supported interval.
-// It returns true if the version is within the interval and false otherwise.
-func (supported SupportedMicroversions) IsSupported(version string) (bool, error) {
-	// Parse the version X.Y into X and Y integers that are easier to compare.
-	vMajor, vMinor, err := ParseMicroversion(version)
-	if err != nil {
-		return false, err
-	}
-
-	// Check that the major version number is supported.
-	if (vMajor < supported.MinMajor) || (vMajor > supported.MaxMajor) {
-		return false, nil
-	}
-
-	// Check that the minor version number is supported
-	if (vMinor <= supported.MaxMinor) && (vMinor >= supported.MinMinor) {
-		return true, nil
-	}
-
-	return false, nil
-}
-
-// ParseMicroversion parses the version major.minor into separate integers major and minor.
-// For example, "2.53" becomes 2 and 53.
-func ParseMicroversion(version string) (major int, minor int, err error) {
-	parts := strings.Split(version, ".")
-	if len(parts) != 2 {
-		return 0, 0, fmt.Errorf("invalid microversion format: %q", version)
-	}
-	major, err = strconv.Atoi(parts[0])
-	if err != nil {
-		return 0, 0, err
-	}
-	minor, err = strconv.Atoi(parts[1])
-	if err != nil {
-		return 0, 0, err
-	}
-	return major, minor, nil
-}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go
new file mode 100644
index 0000000000..86d1d14c34
--- /dev/null
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go
@@ -0,0 +1,372 @@
+package utils
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/gophercloud/gophercloud/v2"
+)
+
+type Status string
+
+const (
+	StatusCurrent      Status = "CURRENT"
+	StatusSupported    Status = "SUPPORTED"
+	StatusDeprecated   Status = "DEPRECATED"
+	StatusExperimental Status = "EXPERIMENTAL"
+	StatusUnknown      Status = ""
+)
+
+// SupportedVersion stores a normalized form of the API version data. It handles APIs that
+// support microversions as well as those that do not.
+type SupportedVersion struct {
+	// Major is the major version number of the API
+	Major int
+	// Minor is the minor version number of the API
+	Minor int
+	// Status is the status of the API
+	Status Status
+	SupportedMicroversions
+}
+
+// SupportedMicroversions stores a normalized form of the maximum and minimum API microversions
+// supported by a given service.
+type SupportedMicroversions struct {
+	// MaxMajor is the major version number of the maximum supported API microversion
+	MaxMajor int
+	// MaxMinor is the minor version number of the maximum supported API microversion
+	MaxMinor int
+	// MinMajor is the major version number of the minimum supported API microversion
+	MinMajor int
+	// MinMinor is the minor version number of the minimum supported API microversion
+	MinMinor int
+}
+
+type version struct {
+	ID         string `json:"id"`
+	Status     string `json:"status"`
+	Version    string `json:"version,omitempty"`
+	MaxVersion string `json:"max_version,omitempty"`
+	MinVersion string `json:"min_version"`
+}
+
+type response struct {
+	Versions []version `json:"-"`
+}
+
+func (r *response) UnmarshalJSON(in []byte) error {
+	// intermediateResponse is an intermediate struct that allows us to offload the difference
+	// between a single version document and a multi-version document to the json parser and
+	// only focus on differences in the latter
+	type intermediateResponse struct {
+		ID       string           `json:"id"`
+		Version  *version         `json:"version"`
+		Versions *json.RawMessage `json:"versions"`
+	}
+
+	data := intermediateResponse{}
+	if err := json.Unmarshal(in, &data); err != nil {
+		return err
+	}
+
+	// case 1: we have a single enveloped version object
+	//
+	// this is the approach used by Manila for single version responses
+	if data.Version != nil {
+		r.Versions = []version{*data.Version}
+		return nil
+	}
+
+	// case 2: we have an singly enveloped array of version objects
+	//
+	// this is the approach used by nova, cinder and glance, among others, for multi-version
+	// responses
+	if data.Versions != nil {
+		var versionArr []version
+		if err := json.Unmarshal(*data.Versions, &versionArr); err == nil {
+			r.Versions = versionArr
+			return nil
+		}
+	}
+
+	// case 3: we have an doubly enveloped array of version objects
+	//
+	// this is the approach used by keystone and barbican, among others, for multi-version
+	// responses
+	if data.Versions != nil {
+		type values struct {
+			Values []version `json:"values"`
+		}
+
+		var valuesObj values
+		if err := json.Unmarshal(*data.Versions, &valuesObj); err == nil {
+			r.Versions = valuesObj.Values
+			return nil
+		}
+	}
+
+	// case 4: we have a single unenveloped version object
+	//
+	// this is the approach used by most other services for single version responses
+	if data.ID != "" {
+		r.Versions = []version{{ID: data.ID}}
+		return nil
+	}
+
+	return fmt.Errorf("failed to unmarshal versions document: %s", in)
+}
+
+func extractVersion(endpointURL string) (int, int, error) {
+	u, err := url.Parse(endpointURL)
+	if err != nil {
+		return 0, 0, err
+	}
+
+	parts := strings.Split(strings.TrimRight(u.Path, "/"), "/")
+	if len(parts) == 0 {
+		return 0, 0, fmt.Errorf("expected path with version, got: %s", u.Path)
+	}
+
+	// first, check the nth path element for a version string
+	if majorVersion, minorVersion, err := ParseVersion(parts[len(parts)-1]); err == nil {
+		return majorVersion, minorVersion, nil
+	}
+
+	// if there are no more parts, quit
+	if len(parts) == 1 {
+		// we don't return the error message directly since it might be misleading: at this point
+		// we might have a *malformed* version identifier rather than *no* version identifier
+		return 0, 0, fmt.Errorf("failed to infer version from path: %s", u.Path)
+	}
+
+	// the guidelines say we should use the currently scoped project_id from the token, but we
+	// don't necessarily have a token yet so we speculatively look at the (n-1)th path element
+	// (but only that) just as keystoneauth does
+	//
+	// https://github.com/openstack/keystoneauth/blob/master/keystoneauth1/discover.py#L1534-L1545
+	if majorVersion, minorVersion, err := ParseVersion(parts[len(parts)-1]); err == nil {
+		return majorVersion, minorVersion, err
+	}
+
+	// once again, we don't return the error message directly
+	return 0, 0, fmt.Errorf("failed to infer version from path: %s", u.Path)
+}
+
+// GetServiceVersions returns the versions supported by the ServiceClient Endpoint.
+// If the endpoint resolves to an unversioned discovery API, this should return one or more supported versions.
+// If the endpoint resolves to a versioned discovery API, this should return exactly one supported version.
+func GetServiceVersions(ctx context.Context, client *gophercloud.ProviderClient, endpointURL string, discoverVersions bool) ([]SupportedVersion, error) {
+	var supportedVersions []SupportedVersion
+	var endpointVersion *SupportedVersion
+
+	if majorVersion, minorVersion, err := extractVersion(endpointURL); err == nil {
+		endpointVersion = &SupportedVersion{Major: majorVersion, Minor: minorVersion}
+		if !discoverVersions {
+			return append(supportedVersions, *endpointVersion), nil
+		}
+	}
+
+	var resp response
+	_, err := client.Request(ctx, "GET", endpointURL, &gophercloud.RequestOpts{
+		JSONResponse: &resp,
+		OkCodes:      []int{200, 300},
+	})
+	if err != nil {
+		// we weren't able to find a discovery document but we have version information from the URL
+		if endpointVersion != nil {
+			return append(supportedVersions, *endpointVersion), nil
+		}
+		return supportedVersions, err
+	}
+
+	versions := resp.Versions
+
+	for _, version := range versions {
+		majorVersion, minorVersion, err := ParseVersion(version.ID)
+		if err != nil {
+			return supportedVersions, err
+		}
+
+		status, err := ParseStatus(version.Status)
+		if err != nil {
+			return supportedVersions, err
+		}
+
+		supportedVersion := SupportedVersion{
+			Major:  majorVersion,
+			Minor:  minorVersion,
+			Status: status,
+		}
+
+		// Only normalize the microversions if there are microversions to normalize
+		if (version.Version != "" || version.MaxVersion != "") && version.MinVersion != "" {
+			supportedVersion.MinMajor, supportedVersion.MinMinor, err = ParseMicroversion(version.MinVersion)
+			if err != nil {
+				return supportedVersions, err
+			}
+
+			maxVersion := version.Version
+			if maxVersion == "" {
+				maxVersion = version.MaxVersion
+			}
+			supportedVersion.MaxMajor, supportedVersion.MaxMinor, err = ParseMicroversion(maxVersion)
+			if err != nil {
+				return supportedVersions, err
+			}
+		}
+
+		supportedVersions = append(supportedVersions, supportedVersion)
+	}
+
+	sort.Slice(supportedVersions, func(i, j int) bool {
+		return supportedVersions[i].Major > supportedVersions[j].Major || (supportedVersions[i].Major == supportedVersions[j].Major &&
+			supportedVersions[i].Minor > supportedVersions[j].Minor)
+	})
+
+	return supportedVersions, nil
+}
+
+// GetSupportedMicroversions returns the minimum and maximum microversion that is supported by the ServiceClient Endpoint.
+func GetSupportedMicroversions(ctx context.Context, client *gophercloud.ServiceClient) (SupportedMicroversions, error) {
+	var supportedMicroversions SupportedMicroversions
+
+	supportedVersions, err := GetServiceVersions(ctx, client.ProviderClient, client.Endpoint, true)
+	if err != nil {
+		return supportedMicroversions, err
+	}
+
+	// If there are multiple versions then we were handed an unversioned endpoint. These don't
+	// provide microversion information, so we need to fail. Likewise, if there are no versions
+	// then something has gone wrong and we also need to fail.
+	if len(supportedVersions) > 1 {
+		return supportedMicroversions, fmt.Errorf("unversioned endpoint with multiple alternatives not supported")
+	} else if len(supportedVersions) == 0 {
+		return supportedMicroversions, fmt.Errorf("microversions not supported by endpoint")
+	}
+
+	supportedMicroversions = supportedVersions[0].SupportedMicroversions
+
+	if supportedMicroversions.MaxMajor == 0 &&
+		supportedMicroversions.MaxMinor == 0 &&
+		supportedMicroversions.MinMajor == 0 &&
+		supportedMicroversions.MinMinor == 0 {
+		return supportedMicroversions, fmt.Errorf("microversions not supported by endpoint")
+	}
+
+	return supportedMicroversions, err
+}
+
+// RequireMicroversion checks that the required microversion is supported and
+// returns a ServiceClient with the microversion set.
+func RequireMicroversion(ctx context.Context, client gophercloud.ServiceClient, required string) (gophercloud.ServiceClient, error) {
+	supportedMicroversions, err := GetSupportedMicroversions(ctx, &client)
+	if err != nil {
+		return client, fmt.Errorf("unable to determine supported microversions: %w", err)
+	}
+	supported, err := supportedMicroversions.IsSupported(required)
+	if err != nil {
+		return client, err
+	}
+	if !supported {
+		return client, fmt.Errorf("microversion %s not supported. Supported versions: %v", required, supportedMicroversions)
+	}
+	client.Microversion = required
+	return client, nil
+}
+
+// IsSupported checks if a microversion falls in the supported interval.
+// It returns true if the version is within the interval and false otherwise.
+func (supported SupportedMicroversions) IsSupported(version string) (bool, error) {
+	// Parse the version X.Y into X and Y integers that are easier to compare.
+	vMajor, vMinor, err := ParseMicroversion(version)
+	if err != nil {
+		return false, err
+	}
+
+	// Check that the major version number is supported.
+	if (vMajor < supported.MinMajor) || (vMajor > supported.MaxMajor) {
+		return false, nil
+	}
+
+	// Check that the minor version number is supported
+	if (vMinor <= supported.MaxMinor) && (vMinor >= supported.MinMinor) {
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// ParseVersion parsed the version strings v{MAJOR} and v{MAJOR}.{MINOR} into separate integers
+// major and minor.
+// For example, "v2.1" becomes 2 and 1, "v3" becomes 3 and 0, and "1" becomes 1 and 0.
+func ParseVersion(version string) (major, minor int, err error) {
+	if version == "" {
+		return 0, 0, fmt.Errorf("empty version provided")
+	}
+
+	// We use the regex indicated by the version discovery guidelines.
+	//
+	// https://specs.openstack.org/openstack/api-sig/guidelines/consuming-catalog/version-discovery.html#inferring-version
+	//
+	// However, we diverge slightly since not all services include the 'v' prefix (glares at zaqar)
+	versionRe := regexp.MustCompile(`^v?(?P[0-9]+)(\.(?P[0-9]+))?$`)
+
+	match := versionRe.FindStringSubmatch(version)
+	if len(match) == 0 {
+		return 0, 0, fmt.Errorf("invalid format: %q", version)
+	}
+
+	major, err = strconv.Atoi(match[versionRe.SubexpIndex("major")])
+	if err != nil {
+		return 0, 0, err
+	}
+
+	minor = 0
+	if match[versionRe.SubexpIndex("minor")] != "" {
+		minor, err = strconv.Atoi(match[versionRe.SubexpIndex("minor")])
+		if err != nil {
+			return 0, 0, err
+		}
+	}
+
+	return major, minor, nil
+}
+
+// ParseMicroversion parses the version major.minor into separate integers major and minor.
+// For example, "2.53" becomes 2 and 53.
+func ParseMicroversion(version string) (major int, minor int, err error) {
+	parts := strings.Split(version, ".")
+	if len(parts) != 2 {
+		return 0, 0, fmt.Errorf("invalid microversion format: %q", version)
+	}
+	major, err = strconv.Atoi(parts[0])
+	if err != nil {
+		return 0, 0, err
+	}
+	minor, err = strconv.Atoi(parts[1])
+	if err != nil {
+		return 0, 0, err
+	}
+	return major, minor, nil
+}
+
+func ParseStatus(status string) (Status, error) {
+	switch strings.ToUpper(status) {
+	case "CURRENT", "STABLE": // keystone uses STABLE instead of CURRENT
+		return StatusCurrent, nil
+	case "SUPPORTED":
+		return StatusSupported, nil
+	case "DEPRECATED":
+		return StatusDeprecated, nil
+	case "":
+		return StatusUnknown, nil
+	default:
+		return StatusUnknown, fmt.Errorf("invalid status: %q", status)
+	}
+}
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go
index 52fcd38ab3..9048e83def 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go
@@ -7,13 +7,14 @@ import (
 	"errors"
 	"io"
 	"net/http"
+	"slices"
 	"strings"
 	"sync"
 )
 
 // DefaultUserAgent is the default User-Agent string set in the request header.
 const (
-	DefaultUserAgent         = "gophercloud/v2.7.0"
+	DefaultUserAgent         = "gophercloud/v2.9.0"
 	DefaultMaxBackoffRetries = 60
 )
 
@@ -437,16 +438,8 @@ func (client *ProviderClient) doRequest(ctx context.Context, method, url string,
 		okc = defaultOkCodes(method)
 	}
 
-	// Validate the HTTP response status.
-	var ok bool
-	for _, code := range okc {
-		if resp.StatusCode == code {
-			ok = true
-			break
-		}
-	}
-
-	if !ok {
+	// Check the response code against the acceptable codes
+	if !slices.Contains(okc, resp.StatusCode) {
 		body, _ := io.ReadAll(resp.Body)
 		resp.Body.Close()
 		respErr := ErrUnexpectedResponseCode{
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/service_client.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/service_client.go
index c1f9f41d4d..015c3f2339 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/service_client.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/service_client.go
@@ -130,6 +130,9 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) {
 		opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion
 	case "baremetal-introspection":
 		opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion
+	case "container-infrastructure-management", "container-infrastructure", "container-infra":
+		// magnum should accept container-infrastructure-management but (as of Epoxy) does not
+		serviceType = "container-infra"
 	}
 
 	if client.Type != "" {
diff --git a/openshift/vendor/github.com/gophercloud/gophercloud/v2/util.go b/openshift/vendor/github.com/gophercloud/gophercloud/v2/util.go
index ad8a7dfaaa..d11a723b1b 100644
--- a/openshift/vendor/github.com/gophercloud/gophercloud/v2/util.go
+++ b/openshift/vendor/github.com/gophercloud/gophercloud/v2/util.go
@@ -37,9 +37,6 @@ func NormalizePathURL(basePath, rawPath string) (string, error) {
 
 	absPathSys = filepath.Join(basePath, rawPath)
 	u.Path = filepath.ToSlash(absPathSys)
-	if err != nil {
-		return "", err
-	}
 	u.Scheme = "file"
 	return u.String(), nil
 }
diff --git a/openshift/vendor/github.com/hashicorp/go-version/LICENSE b/openshift/vendor/github.com/hashicorp/go-version/LICENSE
index 1409d6ab92..bb1e9a486a 100644
--- a/openshift/vendor/github.com/hashicorp/go-version/LICENSE
+++ b/openshift/vendor/github.com/hashicorp/go-version/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2014 HashiCorp, Inc.
+Copyright IBM Corp. 2014, 2025
 
 Mozilla Public License, version 2.0
 
diff --git a/openshift/vendor/github.com/hashicorp/go-version/README.md b/openshift/vendor/github.com/hashicorp/go-version/README.md
index 4b7806cd96..83a8249f72 100644
--- a/openshift/vendor/github.com/hashicorp/go-version/README.md
+++ b/openshift/vendor/github.com/hashicorp/go-version/README.md
@@ -1,6 +1,7 @@
 # Versioning Library for Go
+
 ![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg)
-[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version)
+[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-version.svg)](https://pkg.go.dev/github.com/hashicorp/go-version)
 
 go-version is a library for parsing versions and version constraints,
 and verifying versions against a set of constraints. go-version
@@ -12,7 +13,7 @@ Versions used with go-version must follow [SemVer](http://semver.org/).
 ## Installation and Usage
 
 Package documentation can be found on
-[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
+[Go Reference](https://pkg.go.dev/github.com/hashicorp/go-version).
 
 Installation can be done with a normal `go get`:
 
diff --git a/openshift/vendor/github.com/hashicorp/go-version/constraint.go b/openshift/vendor/github.com/hashicorp/go-version/constraint.go
index 29bdc4d2b5..3964da070d 100644
--- a/openshift/vendor/github.com/hashicorp/go-version/constraint.go
+++ b/openshift/vendor/github.com/hashicorp/go-version/constraint.go
@@ -1,4 +1,4 @@
-// Copyright (c) HashiCorp, Inc.
+// Copyright IBM Corp. 2014, 2025
 // SPDX-License-Identifier: MPL-2.0
 
 package version
@@ -8,8 +8,26 @@ import (
 	"regexp"
 	"sort"
 	"strings"
+	"sync"
 )
 
+var (
+	constraintRegexp     *regexp.Regexp
+	constraintRegexpOnce sync.Once
+)
+
+func getConstraintRegexp() *regexp.Regexp {
+	constraintRegexpOnce.Do(func() {
+		// This heavy lifting only happens the first time this function is called
+		constraintRegexp = regexp.MustCompile(fmt.Sprintf(
+			`^\s*(%s)\s*(%s)\s*$`,
+			`<=|>=|!=|~>|<|>|=|`,
+			VersionRegexpRaw,
+		))
+	})
+	return constraintRegexp
+}
+
 // Constraint represents a single constraint for a version, such as
 // ">= 1.0".
 type Constraint struct {
@@ -29,38 +47,11 @@ type Constraints []*Constraint
 
 type constraintFunc func(v, c *Version) bool
 
-var constraintOperators map[string]constraintOperation
-
 type constraintOperation struct {
 	op operator
 	f  constraintFunc
 }
 
-var constraintRegexp *regexp.Regexp
-
-func init() {
-	constraintOperators = map[string]constraintOperation{
-		"":   {op: equal, f: constraintEqual},
-		"=":  {op: equal, f: constraintEqual},
-		"!=": {op: notEqual, f: constraintNotEqual},
-		">":  {op: greaterThan, f: constraintGreaterThan},
-		"<":  {op: lessThan, f: constraintLessThan},
-		">=": {op: greaterThanEqual, f: constraintGreaterThanEqual},
-		"<=": {op: lessThanEqual, f: constraintLessThanEqual},
-		"~>": {op: pessimistic, f: constraintPessimistic},
-	}
-
-	ops := make([]string, 0, len(constraintOperators))
-	for k := range constraintOperators {
-		ops = append(ops, regexp.QuoteMeta(k))
-	}
-
-	constraintRegexp = regexp.MustCompile(fmt.Sprintf(
-		`^\s*(%s)\s*(%s)\s*$`,
-		strings.Join(ops, "|"),
-		VersionRegexpRaw))
-}
-
 // NewConstraint will parse one or more constraints from the given
 // constraint string. The string must be a comma-separated list of
 // constraints.
@@ -107,7 +98,7 @@ func (cs Constraints) Check(v *Version) bool {
 // to '>0.2' it is *NOT* treated as equal.
 //
 // Missing operator is treated as equal to '=', whitespaces
-// are ignored and constraints are sorted before comaparison.
+// are ignored and constraints are sorted before comparison.
 func (cs Constraints) Equals(c Constraints) bool {
 	if len(cs) != len(c) {
 		return false
@@ -176,9 +167,9 @@ func (c *Constraint) String() string {
 }
 
 func parseSingle(v string) (*Constraint, error) {
-	matches := constraintRegexp.FindStringSubmatch(v)
+	matches := getConstraintRegexp().FindStringSubmatch(v)
 	if matches == nil {
-		return nil, fmt.Errorf("Malformed constraint: %s", v)
+		return nil, fmt.Errorf("malformed constraint: %s", v)
 	}
 
 	check, err := NewVersion(matches[2])
@@ -186,7 +177,25 @@ func parseSingle(v string) (*Constraint, error) {
 		return nil, err
 	}
 
-	cop := constraintOperators[matches[1]]
+	var cop constraintOperation
+	switch matches[1] {
+	case "=":
+		cop = constraintOperation{op: equal, f: constraintEqual}
+	case "!=":
+		cop = constraintOperation{op: notEqual, f: constraintNotEqual}
+	case ">":
+		cop = constraintOperation{op: greaterThan, f: constraintGreaterThan}
+	case "<":
+		cop = constraintOperation{op: lessThan, f: constraintLessThan}
+	case ">=":
+		cop = constraintOperation{op: greaterThanEqual, f: constraintGreaterThanEqual}
+	case "<=":
+		cop = constraintOperation{op: lessThanEqual, f: constraintLessThanEqual}
+	case "~>":
+		cop = constraintOperation{op: pessimistic, f: constraintPessimistic}
+	default:
+		cop = constraintOperation{op: equal, f: constraintEqual}
+	}
 
 	return &Constraint{
 		f:        cop.f,
diff --git a/openshift/vendor/github.com/hashicorp/go-version/version.go b/openshift/vendor/github.com/hashicorp/go-version/version.go
index 7c683c2813..17b29732ee 100644
--- a/openshift/vendor/github.com/hashicorp/go-version/version.go
+++ b/openshift/vendor/github.com/hashicorp/go-version/version.go
@@ -1,23 +1,39 @@
-// Copyright (c) HashiCorp, Inc.
+// Copyright IBM Corp. 2014, 2025
 // SPDX-License-Identifier: MPL-2.0
 
 package version
 
 import (
-	"bytes"
 	"database/sql/driver"
 	"fmt"
 	"regexp"
 	"strconv"
 	"strings"
+	"sync"
 )
 
 // The compiled regular expression used to test the validity of a version.
 var (
-	versionRegexp *regexp.Regexp
-	semverRegexp  *regexp.Regexp
+	versionRegexp     *regexp.Regexp
+	versionRegexpOnce sync.Once
+	semverRegexp      *regexp.Regexp
+	semverRegexpOnce  sync.Once
 )
 
+func getVersionRegexp() *regexp.Regexp {
+	versionRegexpOnce.Do(func() {
+		versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
+	})
+	return versionRegexp
+}
+
+func getSemverRegexp() *regexp.Regexp {
+	semverRegexpOnce.Do(func() {
+		semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
+	})
+	return semverRegexp
+}
+
 // The raw regular expression string used for testing the validity
 // of a version.
 const (
@@ -42,28 +58,23 @@ type Version struct {
 	original string
 }
 
-func init() {
-	versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
-	semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
-}
-
 // NewVersion parses the given version and returns a new
 // Version.
 func NewVersion(v string) (*Version, error) {
-	return newVersion(v, versionRegexp)
+	return newVersion(v, getVersionRegexp())
 }
 
 // NewSemver parses the given version and returns a new
 // Version that adheres strictly to SemVer specs
 // https://semver.org/
 func NewSemver(v string) (*Version, error) {
-	return newVersion(v, semverRegexp)
+	return newVersion(v, getSemverRegexp())
 }
 
 func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
 	matches := pattern.FindStringSubmatch(v)
 	if matches == nil {
-		return nil, fmt.Errorf("Malformed version: %s", v)
+		return nil, fmt.Errorf("malformed version: %s", v)
 	}
 	segmentsStr := strings.Split(matches[1], ".")
 	segments := make([]int64, len(segmentsStr))
@@ -71,7 +82,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
 		val, err := strconv.ParseInt(str, 10, 64)
 		if err != nil {
 			return nil, fmt.Errorf(
-				"Error parsing version: %s", err)
+				"error parsing version: %s", err)
 		}
 
 		segments[i] = val
@@ -174,7 +185,7 @@ func (v *Version) Compare(other *Version) int {
 		} else if lhs < rhs {
 			return -1
 		}
-		// Otherwis, rhs was > lhs, they're not equal
+		// Otherwise, rhs was > lhs, they're not equal
 		return 1
 	}
 
@@ -382,22 +393,29 @@ func (v *Version) Segments64() []int64 {
 // missing parts (1.0 => 1.0.0) will be made into a canonicalized form
 // as shown in the parenthesized examples.
 func (v *Version) String() string {
-	var buf bytes.Buffer
-	fmtParts := make([]string, len(v.segments))
+	return string(v.bytes())
+}
+
+func (v *Version) bytes() []byte {
+	var buf []byte
 	for i, s := range v.segments {
-		// We can ignore err here since we've pre-parsed the values in segments
-		str := strconv.FormatInt(s, 10)
-		fmtParts[i] = str
+		if i > 0 {
+			buf = append(buf, '.')
+		}
+		buf = strconv.AppendInt(buf, s, 10)
 	}
-	fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
+
 	if v.pre != "" {
-		fmt.Fprintf(&buf, "-%s", v.pre)
+		buf = append(buf, '-')
+		buf = append(buf, v.pre...)
 	}
+
 	if v.metadata != "" {
-		fmt.Fprintf(&buf, "+%s", v.metadata)
+		buf = append(buf, '+')
+		buf = append(buf, v.metadata...)
 	}
 
-	return buf.String()
+	return buf
 }
 
 // Original returns the original parsed version as-is, including any
diff --git a/openshift/vendor/github.com/hashicorp/go-version/version_collection.go b/openshift/vendor/github.com/hashicorp/go-version/version_collection.go
index 83547fe13d..11bc8b1c56 100644
--- a/openshift/vendor/github.com/hashicorp/go-version/version_collection.go
+++ b/openshift/vendor/github.com/hashicorp/go-version/version_collection.go
@@ -1,4 +1,4 @@
-// Copyright (c) HashiCorp, Inc.
+// Copyright IBM Corp. 2014, 2025
 // SPDX-License-Identifier: MPL-2.0
 
 package version
diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/openshift/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
index 69b15d1848..9d1bb914b6 100644
--- a/openshift/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
+++ b/openshift/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -1,3 +1,112 @@
+## 2.27.3
+
+### Fixes
+report exit result in case of failure [1c9f356]
+fix data race [ece19c8]
+
+## 2.27.2
+
+### Fixes
+- inline automaxprocs to simplify dependencies; this will be removed when Go 1.26 comes out [a69113a]
+
+### Maintenance
+- Fix syntax errors and typo [a99c6e0]
+- Fix paragraph position error [f993df5]
+
+## 2.27.1
+
+### Fixes
+- Fix Ginkgo Reporter slice-bounds panic [606c1cb]
+- Bug Fix: Add GinkoTBWrapper.Attr() and GinkoTBWrapper.Output() [a6463b3]
+
+## 2.27.0
+
+### Features
+
+#### Transforming Nodes during Tree Construction
+
+This release adds support for `NodeArgsTransformer`s that can be registered with `AddTreeConstructionNodeArgsTransformer`.
+
+These are called during the tree construction phase as nodes are constructed and can modify the node strings and decorators.  This enables frameworks built on top of Ginkgo to modify Ginkgo nodes and enforce conventions.
+
+Learn more [here](https://onsi.github.io/ginkgo/#advanced-transforming-node-arguments-during-tree-construction).
+
+#### Spec Prioritization
+
+A new `SpecPriority(int)` decorator has been added.  Ginkgo will honor priority when ordering specs, ensuring that higher priority specs start running before lower priority specs
+
+Learn more [here](https://onsi.github.io/ginkgo/#prioritizing-specs).
+
+### Maintenance
+- Bump rexml from 3.4.0 to 3.4.2 in /docs (#1595) [1333dae]
+- Bump github.com/gkampitakis/go-snaps from 0.5.14 to 0.5.15 (#1600) [17ae63e]
+
+## 2.26.0
+
+### Features
+
+Ginkgo can now generate json-formatted reports that are compatible with the `go test` json format.  Use `ginkgo --gojson-report=report.go.json`.  This is not intended to be a replacement for Ginkgo's native json format which is more information rich and better models Ginkgo's test structure semantics.
+
+## 2.25.3
+
+### Fixes
+
+- emit --github-output group only for progress report itself [f01aed1]
+
+## 2.25.2
+
+### Fixes
+Add github output group for progress report content
+
+### Maintenance
+Bump Gomega
+
+## 2.25.1
+
+### Fixes
+- fix(types): ignore nameless nodes on FullText() [10866d3]
+- chore: fix some CodeQL warnings [2e42cff]
+
+## 2.25.0
+
+### `AroundNode`
+
+This release introduces a new decorator to support more complex spec setup usecases.
+
+`AroundNode` registers a function that runs before each individual node.  This is considered a more advanced decorator.
+
+Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information and some examples.
+
+Allowed signatures:
+
+- `AroundNode(func())` - `func` will be called before the node is run.
+- `AroundNode(func(ctx context.Context) context.Context)` - `func` can wrap the passed in context and return a new one which will be passed on to the node.
+- `AroundNode(func(ctx context.Context, body func(ctx context.Context)))` - `ctx` is the context for the node and `body` is a function that must be called to run the node.  This gives you complete control over what runs before and after the node.
+
+Multiple `AroundNode` decorators can be applied to a single node and they will run in the order they are applied.
+
+Unlike setup nodes like `BeforeEach` and `DeferCleanup`, `AroundNode` is guaranteed to run in the same goroutine as the decorated node.  This is necessary when working with lower-level libraries that must run on a single thread (you can call `runtime.LockOSThread()` in the `AroundNode` to ensure that the node runs on a single thread).
+
+Since `AroundNode` allows you to modify the context you can also use `AroundNode` to implement shared setup that attaches values to the context.
+
+If applied to a container, `AroundNode` will run before every node in the container.  Including setup nodes like `BeforeEach` and `DeferCleanup`.
+
+`AroundNode` can also be applied to `RunSpecs` to run before every node in the suite.  This opens up new mechanisms for instrumenting individual nodes across an entire suite.
+
+## 2.24.0
+
+### Features
+
+Specs can now be decorated with (e.g.) `SemVerConstraint("2.1.0")` and `ginkgo --sem-ver-filter="2.1.1"` will only run constrained specs that match the requested version.  Learn more in the docs [here](https://onsi.github.io/ginkgo/#spec-semantic-version-filtering)!  Thanks to @Icarus9913 for the PR.
+
+### Fixes
+
+- remove -o from run command [3f5d379].  fixes [#1582](https://github.com/onsi/ginkgo/issues/1582)
+
+### Maintenance
+
+Numerous dependency bumps and documentation fixes
+
 ## 2.23.4
 
 Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container.  Thanks to @emirot for the fix!
diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/README.md b/openshift/vendor/github.com/onsi/ginkgo/v2/README.md
index e3d0c13cc6..7b7ab9e39c 100644
--- a/openshift/vendor/github.com/onsi/ginkgo/v2/README.md
+++ b/openshift/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -113,3 +113,13 @@ Ginkgo is MIT-Licensed
 ## Contributing
 
 See [CONTRIBUTING.md](CONTRIBUTING.md)
+
+## Sponsors
+
+Sponsors commit to a [sponsorship](https://github.com/sponsors/onsi) for a year.  If you're an organization that makes use of Ginkgo please consider becoming a sponsor!
+
+

Browser testing via + + + +

diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/openshift/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index d027bdff93..7e165e4738 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -186,6 +186,20 @@ func GinkgoLabelFilter() string { return suiteConfig.LabelFilter } +/* +GinkgoSemVerFilter() returns the semantic version filter configured for this suite via `--sem-ver-filter`. + +You can use this to manually check if a set of semantic version constraints would satisfy the filter via: + + if (SemVerConstraint("> 2.6.0", "< 2.8.0").MatchesSemVerFilter(GinkgoSemVerFilter())) { + //... + } +*/ +func GinkgoSemVerFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.SemVerFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -254,7 +268,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) var reporter reporters.Reporter if suiteConfig.ParallelTotal == 1 { @@ -297,7 +311,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -316,8 +330,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { return passed } -func extractSuiteConfiguration(args []any) Labels { +func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) { suiteLabels := Labels{} + suiteSemVerConstraints := SemVerConstraints{} + aroundNodes := types.AroundNodes{} configErrors := []error{} for _, arg := range args { switch arg := arg.(type) { @@ -327,6 +343,10 @@ func extractSuiteConfiguration(args []any) Labels { reporterConfig = arg case Labels: suiteLabels = append(suiteLabels, arg...) + case SemVerConstraints: + suiteSemVerConstraints = append(suiteSemVerConstraints, arg...) + case types.AroundNodeDecorator: + aroundNodes = append(aroundNodes, arg) default: configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) } @@ -342,7 +362,7 @@ func extractSuiteConfiguration(args []any) Labels { os.Exit(1) } - return suiteLabels + return suiteLabels, suiteSemVerConstraints, aroundNodes } func getwd() (string, error) { @@ -365,7 +385,7 @@ func PreviewSpecs(description string, args ...any) Report { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1 defer func() { @@ -383,7 +403,7 @@ func PreviewSpecs(description string, args ...any) Report { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) return global.Suite.GetPreviewReport() } @@ -481,6 +501,38 @@ func pushNode(node internal.Node, errors []error) bool { return true } +// NodeArgsTransformer is a hook which is called by the test construction DSL methods +// before creating the new node. If it returns any error, the test suite +// prints those errors and exits. The text and arguments can be modified, +// which includes directly changing the args slice that is passed in. +// Arguments have been flattened already, i.e. none of the entries in args is another []any. +// The result may be nested. +// +// The node type is provided for information and remains the same. +// +// The offset is valid for calling NewLocation directly in the +// implementation of TransformNodeArgs to find the location where +// the Ginkgo DSL function is called. An additional offset supplied +// by the caller via args is already included. +// +// A NodeArgsTransformer can be registered with AddTreeConstructionNodeArgsTransformer. +type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error) + +// AddTreeConstructionNodeArgsTransformer registers a NodeArgsTransformer. +// Only nodes which get created after registering a NodeArgsTransformer +// are transformed by it. The returned function can be called to +// unregister the transformer. +// +// Both may only be called during the construction phase. +// +// If there is more than one registered transformer, then the most +// recently added ones get called first. +func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() { + // This conversion could be avoided with a type alias, but type aliases make + // developer documentation less useful. + return internal.AddTreeConstructionNodeArgsTransformer(internal.NodeArgsTransformer(transformer)) +} + /* Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It). @@ -492,7 +544,7 @@ You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-conta In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ func Describe(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -500,7 +552,7 @@ FDescribe focuses specs within the Describe block. */ func FDescribe(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -508,7 +560,7 @@ PDescribe marks specs within the Describe block as pending. */ func PDescribe(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -521,21 +573,21 @@ var XDescribe = PDescribe /* Context is an alias for Describe - it generates the exact same kind of Container node */ var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe -/* When is an alias for Describe - it generates the exact same kind of Container node */ +/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */ func When(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } -/* When is an alias for Describe - it generates the exact same kind of Container node */ +/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */ func FWhen(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } /* When is an alias for Describe - it generates the exact same kind of Container node */ func PWhen(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } var XWhen = PWhen @@ -551,7 +603,7 @@ You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ func It(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -559,7 +611,7 @@ FIt allows you to focus an individual It. */ func FIt(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -567,7 +619,7 @@ PIt allows you to mark an individual It as pending. */ func PIt(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -614,7 +666,7 @@ You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup- func BeforeSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))) } /* @@ -633,7 +685,7 @@ You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup- func AfterSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))) } /* @@ -671,7 +723,7 @@ func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) combinedArgs := []any{process1Body, allProcessBody} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))) } /* @@ -691,7 +743,7 @@ func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) b combinedArgs := []any{allProcessBody, process1Body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))) } /* @@ -704,7 +756,7 @@ You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ func BeforeEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeEach, "", args...))) } /* @@ -717,7 +769,7 @@ You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ func JustBeforeEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))) } /* @@ -732,7 +784,7 @@ You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ func AfterEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterEach, "", args...))) } /* @@ -744,7 +796,7 @@ You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ func JustAfterEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustAfterEach, "", args...))) } /* @@ -759,7 +811,7 @@ You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#o And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ func BeforeAll(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeAll, "", args...))) } /* @@ -776,7 +828,7 @@ You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#o And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ func AfterAll(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterAll, "", args...))) } /* diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/openshift/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index c65af4ce1c..e331d7cf8c 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -99,6 +100,23 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels +/* +SemVerConstraint decorates specs with SemVerConstraints. Multiple semantic version constraints can be passed to SemVerConstraint and these strings must follow the semantic version constraint rules. +SemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple SemVerConstraints to a given node and a spec's semantic version constraints is the union of all semantic version constraints in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func SemVerConstraint(semVerConstraints ...string) SemVerConstraints { + return SemVerConstraints(semVerConstraints) +} + +/* +SemVerConstraints are the type for spec SemVerConstraint decorators. Use SemVerConstraint(...) to construct SemVerConstraints. +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +*/ +type SemVerConstraints = internal.SemVerConstraints + /* PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. @@ -136,8 +154,40 @@ Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will pro */ type GracePeriod = internal.GracePeriod +/* +SpecPriority allows you to assign a priority to a spec or container. + +Specs with higher priority will be scheduled to run before specs with lower priority. The default priority is 0 and negative priorities are allowed. +*/ +type SpecPriority = internal.SpecPriority + /* SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. */ const SuppressProgressReporting = internal.SuppressProgressReporting + +/* +AroundNode registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information. + +Allowed signatures: + +- AroundNode(func()) - func will be called before the node is run. +- AroundNode(func(ctx context.Context) context.Context) - func can wrap the passed in context and return a new one which will be passed on to the node. +- AroundNode(func(ctx context.Context, body func(ctx context.Context))) - ctx is the context for the node and body is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple AroundNode decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like BeforeEach and DeferCleanup, AroundNode is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call runtime.LockOSThread() in the AroundNode to ensure that the node runs on a single thread). + +Since AroundNode allows you to modify the context you can also use AroundNode to implement shared setup that attaches values to the context. You must return a context that inherits from the passed in context. + +If applied to a container, AroundNode will run before every node in the container. Including setup nodes like BeforeEach and DeferCleanup. + +AroundNode can also be applied to RunSpecs to run before every node in the suite. +*/ +func AroundNode[F types.AroundNodeAllowedFuncs](f F) types.AroundNodeDecorator { + return types.AroundNode(f, types.NewCodeLocation(1)) +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go new file mode 100644 index 0000000000..ee6ac7b5f3 --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go @@ -0,0 +1,8 @@ +//go:build !go1.25 +// +build !go1.25 + +package main + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo/automaxprocs" +) diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md new file mode 100644 index 0000000000..e249ebe8b3 --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md @@ -0,0 +1,3 @@ +This entire directory is a lightly modified clone of https://github.com/uber-go/automaxprocs + +It will be removed when Go 1.26 ships and we no longer need to support Go 1.24 (which does not correctly autodetect maxprocs in containers). diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go new file mode 100644 index 0000000000..8a762b51d6 --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package automaxprocs + +import ( + "os" + "runtime" +) + +func init() { + Set() +} + +const _maxProcsKey = "GOMAXPROCS" + +type config struct { + procs func(int, func(v float64) int) (int, CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int +} + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set() error { + cfg := &config{ + procs: CPUQuotaToGOMAXPROCS, + roundQuotaFunc: DefaultRoundFunc, + minGOMAXPROCS: 1, + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if _, exists := os.LookupEnv(_maxProcsKey); exists { + return nil + } + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) + if err != nil { + return err + } + if status == CPUQuotaUndefined { + return nil + } + runtime.GOMAXPROCS(maxProcs) + return nil +} diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go similarity index 99% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go index fe4ecf561e..a4676933e8 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go index e89f543602..ed384891ef 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs const ( // _cgroupFSType is the Linux CGroup file system type used in diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go similarity index 99% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go index 78556062fe..69a0be6b71 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go similarity index 91% rename from openshift/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go index f9057fd273..2d83343bd9 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go @@ -21,12 +21,10 @@ //go:build linux // +build linux -package runtime +package automaxprocs import ( "errors" - - cg "go.uber.org/automaxprocs/internal/cgroups" ) // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process @@ -58,8 +56,8 @@ type queryer interface { } var ( - _newCgroups2 = cg.NewCGroups2ForCurrentProcess - _newCgroups = cg.NewCGroupsForCurrentProcess + _newCgroups2 = NewCGroups2ForCurrentProcess + _newCgroups = NewCGroupsForCurrentProcess _newQueryer = newQueryer ) @@ -68,7 +66,7 @@ func newQueryer() (queryer, error) { if err == nil { return cgroups, nil } - if errors.Is(err, cg.ErrNotV2) { + if errors.Is(err, ErrNotV2) { return _newCgroups() } return nil, err diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go similarity index 98% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go index e74701508e..d2d61e8941 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go @@ -21,7 +21,7 @@ //go:build !linux // +build !linux -package runtime +package automaxprocs // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process // to a valid GOMAXPROCS value. This is Linux-specific and not supported in the diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go similarity index 98% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go index 94ac75a46e..2e235d7d65 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import "fmt" diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go index f3877f78aa..7c3fa306ef 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go similarity index 98% rename from openshift/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go index f8a2834ac0..b8ec7e502a 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package runtime +package automaxprocs import "math" diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go similarity index 99% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go rename to openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go index cddc3eaec3..881ebd5902 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 2b36b2feb9..3021dfec2e 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -29,7 +29,6 @@ func BuildBuildCommand() command.Command { var errors []error cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) command.AbortIfErrors("Ginkgo detected configuration issues:", errors) - buildSpecs(args, cliConfig, goFlagsConfig) }, } diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 8e16d2bb03..f3439a3f0c 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -90,6 +90,9 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC if reporterConfig.JSONReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) } + if reporterConfig.GoJSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.GoJSONReport, GenerateFunc: reporters.GenerateGoTestJSONReport, MergeFunc: reporters.MergeAndCleanupGoTestJSONReports}) + } if reporterConfig.JUnitReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) } diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go index 41052ea19d..48c69a1d83 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -9,6 +9,7 @@ import ( "path/filepath" "regexp" "strings" + "sync/atomic" "syscall" "time" @@ -107,6 +108,9 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } @@ -156,12 +160,15 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { type procResult struct { + proc int + exitResult string passed bool hasProgrammaticFocus bool } numProcs := cliConfig.ComputedProcs() procOutput := make([]*bytes.Buffer, numProcs) + procExitResult := make([]string, numProcs) coverProfiles := []string{} blockProfiles := []string{} @@ -179,6 +186,9 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } @@ -218,16 +228,20 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig args = append(args, additionalArgs...) cmd, buf := buildAndStartCommand(suite, args, false) + var exited atomic.Bool procOutput[proc-1] = buf - server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + server.RegisterAlive(proc, func() bool { return !exited.Load() }) go func() { cmd.Wait() exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() procResults <- procResult{ + proc: proc, + exitResult: cmd.ProcessState.String(), passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, } + exited.Store(true) }() } @@ -236,6 +250,7 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig result := <-procResults passed = passed && result.passed suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + procExitResult[result.proc-1] = result.exitResult } if passed { suite.State = TestSuiteStatePassed @@ -255,6 +270,8 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Exit result of proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s\n", procExitResult[proc-1])) } fmt.Fprintf(os.Stderr, "** End **") } diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go index bd6b8fbff3..419589b48c 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - _ "go.uber.org/automaxprocs" "github.com/onsi/ginkgo/v2/ginkgo/build" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/generators" diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index a34d94354d..75cbdb4962 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -2,12 +2,9 @@ package watch import ( "go/build" - "regexp" + "strings" ) -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) -var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing - type Dependencies struct { deps map[string]int } @@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) { d.addDepIfNotPresent(pkg.Dir, depth) } } @@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) { d.deps[dep] = depth } } + +func matchesGinkgoOrGomega(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega") +} + +func matchesGinkgoIntegration(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 993279de29..40d1e1ab5c 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "context" + "io" "testing" "github.com/onsi/ginkgo/v2/internal/testingtproxy" @@ -69,6 +70,8 @@ type GinkgoTInterface interface { Skipf(format string, args ...any) Skipped() bool TempDir() string + Attr(key, value string) + Output() io.Writer } /* @@ -187,3 +190,9 @@ func (g *GinkgoTBWrapper) Skipped() bool { func (g *GinkgoTBWrapper) TempDir() string { return g.GinkgoT.TempDir() } +func (g *GinkgoTBWrapper) Attr(key, value string) { + g.GinkgoT.Attr(key, value) +} +func (g *GinkgoTBWrapper) Output() io.Writer { + return g.GinkgoT.Output() +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go new file mode 100644 index 0000000000..c965710205 --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go @@ -0,0 +1,34 @@ +package internal + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +func ComputeAroundNodes(specs Specs) Specs { + out := Specs{} + for _, spec := range specs { + nodes := Nodes{} + currentNestingLevel := 0 + aroundNodes := types.AroundNodes{} + nestingLevelIndices := []int{} + for _, node := range spec.Nodes { + switch node.NodeType { + case types.NodeTypeContainer: + currentNestingLevel = node.NestingLevel + 1 + nestingLevelIndices = append(nestingLevelIndices, len(aroundNodes)) + aroundNodes = aroundNodes.Append(node.AroundNodes...) + nodes = append(nodes, node) + default: + if currentNestingLevel > node.NestingLevel { + currentNestingLevel = node.NestingLevel + aroundNodes = aroundNodes[:nestingLevelIndices[currentNestingLevel]] + } + node.AroundNodes = types.AroundNodes{}.Append(aroundNodes...).Append(node.AroundNodes...) + nodes = append(nodes, node) + } + } + spec.Nodes = nodes + out = append(out, spec) + } + return out +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index e3da7d14dd..a39daf5a60 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic *Note:* specs with pending nodes are Skipped when created by NewSpec. */ -func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") @@ -84,6 +84,13 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit }) } + if suiteConfig.SemVerFilter != "" { + semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints())) + }) + } + if len(suiteConfig.FocusFiles) > 0 { focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/group.go index 02c9fe4fcd..cc794903e7 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -110,21 +110,53 @@ func newGroup(suite *Suite) *group { } } +// initialReportForSpec constructs a new SpecReport right before running the spec. func (g *group) initialReportForSpec(spec Spec) types.SpecReport { return types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: g.suite.config.ParallelProcess, - RunningInParallel: g.suite.isRunningInParallel(), - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), - MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints), + ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + SpecPriority: spec.Nodes.GetSpecPriority(), + } +} + +// constructionNodeReportForTreeNode constructs a new SpecReport right before invoking the body +// of a container node during construction of the full tree. +func constructionNodeReportForTreeNode(node *TreeNode) *types.ConstructionNodeReport { + var report types.ConstructionNodeReport + // Walk up the tree and set attributes accordingly. + addNodeToReportForNode(&report, node) + return &report +} + +// addNodeToReportForNode is conceptually similar to initialReportForSpec and therefore placed here +// although it doesn't do anything with a group. +func addNodeToReportForNode(report *types.ConstructionNodeReport, node *TreeNode) { + if node.Parent != nil { + // First add the parent node, then the current one. + addNodeToReportForNode(report, node.Parent) + } + report.ContainerHierarchyTexts = append(report.ContainerHierarchyTexts, node.Node.Text) + report.ContainerHierarchyLocations = append(report.ContainerHierarchyLocations, node.Node.CodeLocation) + report.ContainerHierarchyLabels = append(report.ContainerHierarchyLabels, node.Node.Labels) + report.ContainerHierarchySemVerConstraints = append(report.ContainerHierarchySemVerConstraints, node.Node.SemVerConstraints) + if node.Node.MarkedSerial { + report.IsSerial = true + } + if node.Node.MarkedOrdered { + report.IsInOrderedContainer = true } } diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 8096950b6c..2bccec2dbf 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "reflect" + "slices" "sort" "sync" "time" @@ -46,20 +47,24 @@ type Node struct { ReportEachBody func(SpecContext, types.SpecReport) ReportSuiteBody func(SpecContext, types.Report) - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedContinueOnFailure bool - MarkedOncePerOrdered bool - FlakeAttempts int - MustPassRepeatedly int - Labels Labels - PollProgressAfter time.Duration - PollProgressInterval time.Duration - NodeTimeout time.Duration - SpecTimeout time.Duration - GracePeriod time.Duration + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedContinueOnFailure bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + SemVerConstraints SemVerConstraints + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration + AroundNodes types.AroundNodes + HasExplicitlySetSpecPriority bool + SpecPriority int NodeIDWhereCleanupWasGenerated uint } @@ -85,31 +90,47 @@ type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint type Done chan<- any // Deprecated Done Channel for asynchronous testing -type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration +type SpecPriority int + +type Labels []string func (l Labels) MatchesLabelFilter(query string) bool { return types.MustParseLabelFilter(query)(l) } -func UnionOfLabels(labels ...Labels) Labels { - out := Labels{} - seen := map[string]bool{} - for _, labelSet := range labels { - for _, label := range labelSet { - if !seen[label] { - seen[label] = true - out = append(out, label) +type SemVerConstraints []string + +func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool { + return types.MustParseSemVerFilter(version)(svc) +} + +func unionOf[S ~[]E, E comparable](slices ...S) S { + out := S{} + seen := map[E]bool{} + for _, slice := range slices { + for _, item := range slice { + if !seen[item] { + seen[item] = true + out = append(out, item) } } } return out } +func UnionOfLabels(labels ...Labels) Labels { + return unionOf(labels...) +} + +func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerConstraints { + return unionOf(semVerConstraints...) +} + func PartitionDecorations(args ...any) ([]any, []any) { decorations := []any{} remainingArgs := []any{} @@ -151,6 +172,8 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(SemVerConstraints{}): + return true case t == reflect.TypeOf(PollProgressInterval(0)): return true case t == reflect.TypeOf(PollProgressAfter(0)): @@ -161,6 +184,10 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(GracePeriod(0)): return true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + return true + case t == reflect.TypeOf(SpecPriority(0)): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -191,6 +218,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy NodeType: nodeType, Text: text, Labels: Labels{}, + SemVerConstraints: SemVerConstraints{}, CodeLocation: types.NewCodeLocation(baseOffset), NestingLevel: -1, PollProgressAfter: -1, @@ -205,7 +233,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - args = unrollInterfaceSlice(args) + args = UnrollInterfaceSlice(args) remainingArgs := []any{} // First get the CodeLocation up-to-date @@ -221,6 +249,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } labelsSeen := map[string]bool{} + semVerConstraintsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs remainingArgs = []any{} @@ -299,6 +328,14 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) } + case t == reflect.TypeOf(SpecPriority(0)): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecPriority")) + } + node.SpecPriority = int(arg.(SpecPriority)) + node.HasExplicitlySetSpecPriority = true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + node.AroundNodes = append(node.AroundNodes, arg.(types.AroundNodeDecorator)) case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -311,6 +348,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(err) } } + case t == reflect.TypeOf(SemVerConstraints{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SemVerConstraint")) + } + for _, semVerConstraint := range arg.(SemVerConstraints) { + if !semVerConstraintsSeen[semVerConstraint] { + semVerConstraintsSeen[semVerConstraint] = true + semVerConstraint, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation) + node.SemVerConstraints = append(node.SemVerConstraints, semVerConstraint) + appendError(err) + } + } case t.Kind() == reflect.Func: if nodeType.Is(types.NodeTypeContainer) { if node.Body != nil { @@ -599,7 +648,7 @@ func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(stri }) } - return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) + return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs) } func (n Node) IsZero() bool { @@ -824,6 +873,32 @@ func (n Nodes) UnionOfLabels() []string { return out } +func (n Nodes) SemVerConstraints() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].SemVerConstraints == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].SemVerConstraints) + } + } + return out +} + +func (n Nodes) UnionOfSemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, constraint := range n[i].SemVerConstraints { + if !seen[constraint] { + seen[constraint] = true + out = append(out, constraint) + } + } + } + return out +} + func (n Nodes) CodeLocations() []types.CodeLocation { out := make([]types.CodeLocation, len(n)) for i := range n { @@ -920,7 +995,16 @@ func (n Nodes) GetMaxMustPassRepeatedly() int { return maxMustPassRepeatedly } -func unrollInterfaceSlice(args any) []any { +func (n Nodes) GetSpecPriority() int { + for i := len(n) - 1; i >= 0; i-- { + if n[i].HasExplicitlySetSpecPriority { + return n[i].SpecPriority + } + } + return 0 +} + +func UnrollInterfaceSlice(args any) []any { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { return []any{args} @@ -928,11 +1012,67 @@ func unrollInterfaceSlice(args any) []any { out := []any{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) - if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { - out = append(out, unrollInterfaceSlice(el.Interface())...) + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) && el.Type() != reflect.TypeOf(SemVerConstraints{}) { + out = append(out, UnrollInterfaceSlice(el.Interface())...) } else { out = append(out, v.Index(i).Interface()) } } return out } + +type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error) + +func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() { + id := nodeArgsTransformerCounter + nodeArgsTransformerCounter++ + nodeArgsTransformers = append(nodeArgsTransformers, registeredNodeArgsTransformer{id, transformer}) + return func() { + nodeArgsTransformers = slices.DeleteFunc(nodeArgsTransformers, func(transformer registeredNodeArgsTransformer) bool { + return transformer.id == id + }) + } +} + +var ( + nodeArgsTransformerCounter int64 + nodeArgsTransformers []registeredNodeArgsTransformer +) + +type registeredNodeArgsTransformer struct { + id int64 + transformer NodeArgsTransformer +} + +// TransformNewNodeArgs is the helper for DSL functions which handles NodeArgsTransformers. +// +// Its return valus are intentionally the same as the internal.NewNode parameters, +// which makes it possible to chain the invocations: +// +// NewNode(transformNewNodeArgs(...)) +func TransformNewNodeArgs(exitIfErrors func([]error), deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (*types.DeprecationTracker, types.NodeType, string, []any) { + var errs []error + + // Most recent first... + // + // This intentionally doesn't use slices.Backward because + // using iterators influences stack unwinding. + for i := len(nodeArgsTransformers) - 1; i >= 0; i-- { + transformer := nodeArgsTransformers[i].transformer + args = UnrollInterfaceSlice(args) + + // We do not really need to recompute this on additional loop iterations, + // but its fast and simpler this way. + var offset Offset + for _, arg := range args { + if o, ok := arg.(Offset); ok { + offset = o + } + } + offset += 3 // The DSL function, this helper, and the TransformNodeArgs implementation. + + text, args, errs = transformer(nodeType, offset, text, args) + exitIfErrors(errs) + } + return deprecationTracker, nodeType, text, args +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 84eea0a59e..da58d54f95 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -125,7 +125,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] - // and grab the node on the spec that will represent which shufflable group this execution group belongs tu + // and grab the node on the spec that will represent which shufflable group this execution group belongs to shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle) //add the execution group to its shufflable group @@ -138,14 +138,35 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, } } + // now, for each shuffleable group, we compute the priority + shufflableGroupingIDPriorities := map[uint]int{} + for shufflableGroupingID, groupIDs := range shufflableGroupingIDToGroupIDs { + // the priority of a shufflable grouping is the max priority of any spec in any execution group in the shufflable grouping + maxPriority := -1 << 31 // min int + for _, groupID := range groupIDs { + for _, specIdx := range executionGroups[groupID] { + specPriority := specs[specIdx].Nodes.GetSpecPriority() + maxPriority = max(specPriority, maxPriority) + } + } + shufflableGroupingIDPriorities[shufflableGroupingID] = maxPriority + } + // now we permute the sorted shufflable grouping IDs and build the ordered Groups - orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) - for _, j := range permutation { - //let's get the execution group IDs for this shufflable group: - executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]] - // and we'll add their associated specindices to the orderedGroups slice: - for _, executionGroupID := range executionGroupIDsForJ { + shuffledGroupingIds := make([]uint, len(shufflableGroupingIDs)) + for i, j := range permutation { + shuffledGroupingIds[i] = shufflableGroupingIDs[j] + } + // now, we need to stable sort the shuffledGroupingIds by priority (higher priority first) + sort.SliceStable(shuffledGroupingIds, func(i, j int) bool { + return shufflableGroupingIDPriorities[shuffledGroupingIds[i]] > shufflableGroupingIDPriorities[shuffledGroupingIds[j]] + }) + + // we can now take these prioritized, shuffled, groupings and form the final set of ordered spec groups + orderedGroups := GroupedSpecIndices{} + for _, id := range shuffledGroupingIds { + for _, executionGroupID := range shufflableGroupingIDToGroupIDs[id] { orderedGroups = append(orderedGroups, executionGroups[executionGroupID]) } } diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go index 11269cf1f2..165cbc4b67 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -236,7 +236,7 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { } functionCall.Filename = line[:delimiterIdx] line = strings.Split(line[delimiterIdx+1:], " ")[0] - lineNumber, err := strconv.ParseInt(line, 10, 64) + lineNumber, err := strconv.ParseInt(line, 10, 32) functionCall.Line = int(lineNumber) if err != nil { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go new file mode 100644 index 0000000000..8b7a9ceabf --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go @@ -0,0 +1,158 @@ +package reporters + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/packages" +) + +func ptr[T any](in T) *T { + return &in +} + +type encoder interface { + Encode(v any) error +} + +// gojsonEvent matches the format from go internals +// https://github.com/golang/go/blob/master/src/cmd/internal/test2json/test2json.go#L31-L41 +// https://pkg.go.dev/cmd/test2json +type gojsonEvent struct { + Time *time.Time `json:",omitempty"` + Action GoJSONAction + Package string `json:",omitempty"` + Test string `json:",omitempty"` + Elapsed *float64 `json:",omitempty"` + Output *string `json:",omitempty"` + FailedBuild string `json:",omitempty"` +} + +type GoJSONAction string + +const ( + // start - the test binary is about to be executed + GoJSONStart GoJSONAction = "start" + // run - the test has started running + GoJSONRun GoJSONAction = "run" + // pause - the test has been paused + GoJSONPause GoJSONAction = "pause" + // cont - the test has continued running + GoJSONCont GoJSONAction = "cont" + // pass - the test passed + GoJSONPass GoJSONAction = "pass" + // bench - the benchmark printed log output but did not fail + GoJSONBench GoJSONAction = "bench" + // fail - the test or benchmark failed + GoJSONFail GoJSONAction = "fail" + // output - the test printed output + GoJSONOutput GoJSONAction = "output" + // skip - the test was skipped or the package contained no tests + GoJSONSkip GoJSONAction = "skip" +) + +func goJSONActionFromSpecState(state types.SpecState) GoJSONAction { + switch state { + case types.SpecStateInvalid: + return GoJSONFail + case types.SpecStatePending: + return GoJSONSkip + case types.SpecStateSkipped: + return GoJSONSkip + case types.SpecStatePassed: + return GoJSONPass + case types.SpecStateFailed: + return GoJSONFail + case types.SpecStateAborted: + return GoJSONFail + case types.SpecStatePanicked: + return GoJSONFail + case types.SpecStateInterrupted: + return GoJSONFail + case types.SpecStateTimedout: + return GoJSONFail + default: + panic("unexpected state should not happen") + } +} + +// gojsonReport wraps types.Report and calcualtes extra fields requires by gojson +type gojsonReport struct { + o types.Report + // Extra calculated fields + goPkg string + elapsed float64 +} + +func newReport(in types.Report) *gojsonReport { + return &gojsonReport{ + o: in, + } +} + +func (r *gojsonReport) Fill() error { + // NOTE: could the types.Report include the go package name? + goPkg, err := suitePathToPkg(r.o.SuitePath) + if err != nil { + return err + } + r.goPkg = goPkg + r.elapsed = r.o.RunTime.Seconds() + return nil +} + +// gojsonSpecReport wraps types.SpecReport and calculates extra fields required by gojson +type gojsonSpecReport struct { + o types.SpecReport + // extra calculated fields + testName string + elapsed float64 + action GoJSONAction +} + +func newSpecReport(in types.SpecReport) *gojsonSpecReport { + return &gojsonSpecReport{ + o: in, + } +} + +func (sr *gojsonSpecReport) Fill() error { + sr.elapsed = sr.o.RunTime.Seconds() + sr.testName = createTestName(sr.o) + sr.action = goJSONActionFromSpecState(sr.o.State) + return nil +} + +func suitePathToPkg(dir string) (string, error) { + cfg := &packages.Config{ + Mode: packages.NeedFiles | packages.NeedSyntax, + } + pkgs, err := packages.Load(cfg, dir) + if err != nil { + return "", err + } + if len(pkgs) != 1 { + return "", errors.New("error") + } + return pkgs[0].ID, nil +} + +func createTestName(spec types.SpecReport) string { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } + name = strings.TrimSpace(name) + return name +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go new file mode 100644 index 0000000000..ec5311d069 --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go @@ -0,0 +1,111 @@ +package reporters + +type GoJSONEventWriter struct { + enc encoder + specSystemErrFn specSystemExtractFn + specSystemOutFn specSystemExtractFn +} + +func NewGoJSONEventWriter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONEventWriter { + return &GoJSONEventWriter{ + enc: enc, + specSystemErrFn: errFn, + specSystemOutFn: outFn, + } +} + +func (r *GoJSONEventWriter) writeEvent(e *gojsonEvent) error { + return r.enc.Encode(e) +} + +func (r *GoJSONEventWriter) WriteSuiteStart(report *gojsonReport) error { + e := &gojsonEvent{ + Time: &report.o.StartTime, + Action: GoJSONStart, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSuiteResult(report *gojsonReport) error { + var action GoJSONAction + switch { + case report.o.PreRunStats.SpecsThatWillRun == 0: + action = GoJSONSkip + case report.o.SuiteSucceeded: + action = GoJSONPass + default: + action = GoJSONFail + } + e := &gojsonEvent{ + Time: &report.o.EndTime, + Action: action, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + Elapsed: ptr(report.elapsed), + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecStart(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.StartTime, + Action: GoJSONRun, + Test: specReport.testName, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecOut(report *gojsonReport, specReport *gojsonSpecReport) error { + events := []*gojsonEvent{} + + stdErr := r.specSystemErrFn(specReport.o) + if stdErr != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdErr), + FailedBuild: "", + }) + } + stdOut := r.specSystemOutFn(specReport.o) + if stdOut != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdOut), + FailedBuild: "", + }) + } + + for _, ev := range events { + err := r.writeEvent(ev) + if err != nil { + return err + } + } + return nil +} + +func (r *GoJSONEventWriter) WriteSpecResult(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: specReport.action, + Test: specReport.testName, + Package: report.goPkg, + Elapsed: ptr(specReport.elapsed), + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go new file mode 100644 index 0000000000..633e49b88d --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go @@ -0,0 +1,45 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type GoJSONReporter struct { + ev *GoJSONEventWriter +} + +type specSystemExtractFn func (spec types.SpecReport) string + +func NewGoJSONReporter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONReporter { + return &GoJSONReporter{ + ev: NewGoJSONEventWriter(enc, errFn, outFn), + } +} + +func (r *GoJSONReporter) Write(originalReport types.Report) error { + // suite start events + report := newReport(originalReport) + err := report.Fill() + if err != nil { + return err + } + r.ev.WriteSuiteStart(report) + for _, originalSpecReport := range originalReport.SpecReports { + specReport := newSpecReport(originalSpecReport) + err := specReport.Fill() + if err != nil { + return err + } + if specReport.o.LeafNodeType == types.NodeTypeIt { + // handle any It leaf node as a spec + r.ev.WriteSpecStart(report, specReport) + r.ev.WriteSpecOut(report, specReport) + r.ev.WriteSpecResult(report, specReport) + } else { + // handle any other leaf node as generic output + r.ev.WriteSpecOut(report, specReport) + } + } + r.ev.WriteSuiteResult(report) + return nil +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2d2ea2fc35..99c9c5f5be 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -2,6 +2,7 @@ package internal import ( "context" + "reflect" "github.com/onsi/ginkgo/v2/types" ) @@ -11,6 +12,7 @@ type SpecContext interface { SpecReport() types.SpecReport AttachProgressReporter(func() string) func() + WrappedContext() context.Context } type specContext struct { @@ -45,3 +47,28 @@ func NewSpecContext(suite *Suite) *specContext { func (sc *specContext) SpecReport() types.SpecReport { return sc.suite.CurrentSpecReport() } + +func (sc *specContext) WrappedContext() context.Context { + return sc.Context +} + +/* +The user is allowed to wrap `SpecContext` in a new context.Context when using AroundNodes. But body functions expect SpecContext. +We support this by taking their context.Context and returning a SpecContext that wraps it. +*/ +func wrapContextChain(ctx context.Context) SpecContext { + if ctx == nil { + return nil + } + if reflect.TypeOf(ctx) == reflect.TypeOf(&specContext{}) { + return ctx.(*specContext) + } else if sc, ok := ctx.Value("GINKGO_SPEC_CONTEXT").(*specContext); ok { + return &specContext{ + Context: ctx, + ProgressReporterManager: sc.ProgressReporterManager, + cancel: sc.cancel, + suite: sc.suite, + } + } + return nil +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 3edf507765..ef76cd099e 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -32,6 +32,7 @@ type Suite struct { suiteNodes Nodes cleanupNodes Nodes + aroundNodes types.AroundNodes failer *Failer reporter reporters.Reporter @@ -41,6 +42,8 @@ type Suite struct { config types.SuiteConfig deadline time.Time + currentConstructionNodeReport *types.ConstructionNodeReport + skipAll bool report types.Report currentSpecReport types.SpecReport @@ -87,6 +90,7 @@ func (suite *Suite) Clone() (*Suite, error) { ProgressReporterManager: NewProgressReporterManager(), topLevelContainers: suite.topLevelContainers.Clone(), suiteNodes: suite.suiteNodes.Clone(), + aroundNodes: suite.aroundNodes.Clone(), selectiveLock: &sync.Mutex{}, }, nil } @@ -104,13 +108,14 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } ApplyNestedFocusPolicyToTree(suite.tree) specs := GenerateSpecsFromTreeRoot(suite.tree) - specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig) + specs = ComputeAroundNodes(specs) suite.phase = PhaseRun suite.client = client @@ -120,6 +125,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.outputInterceptor = outputInterceptor suite.interruptHandler = interruptHandler suite.config = suiteConfig + suite.aroundNodes = suiteAroundNodes if suite.config.Timeout > 0 { suite.deadline = time.Now().Add(suite.config.Timeout) @@ -127,7 +133,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) - success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs) cancelProgressHandler() @@ -199,6 +205,14 @@ func (suite *Suite) PushNode(node Node) error { err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) } }() + + // Ensure that code running in the body of the container node + // has access to information about the current container node(s). + suite.currentConstructionNodeReport = constructionNodeReportForTreeNode(suite.tree) + defer func() { + suite.currentConstructionNodeReport = nil + }() + node.Body(nil) return err }() @@ -259,6 +273,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + node.AroundNodes = types.AroundNodes{}.Append(suite.currentNode.AroundNodes...).Append(node.AroundNodes...) suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) suite.selectiveLock.Unlock() @@ -327,6 +342,16 @@ func (suite *Suite) By(text string, callback ...func()) error { return nil } +func (suite *Suite) CurrentConstructionNodeReport() types.ConstructionNodeReport { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + report := suite.currentConstructionNodeReport + if report == nil { + panic("CurrentConstructionNodeReport may only be called during construction of the spec tree") + } + return *report +} + /* Spec Running methods - used during PhaseRun */ @@ -428,13 +453,14 @@ func (suite *Suite) processCurrentSpecReport() { } } -func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { numSpecsThatWillBeRun := specs.CountWithoutSkip() suite.report = types.Report{ SuitePath: suitePath, SuiteDescription: description, SuiteLabels: suiteLabels, + SuiteSemVerConstraints: suiteSemVerConstraints, SuiteConfig: suite.config, SuiteHasProgrammaticFocus: hasProgrammaticFocus, PreRunStats: types.PreRunStats{ @@ -891,7 +917,30 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ failureC <- failureFromRun }() - node.Body(sc) + aroundNodes := types.AroundNodes{}.Append(suite.aroundNodes...).Append(node.AroundNodes...) + if len(aroundNodes) > 0 { + i := 0 + var f func(context.Context) + f = func(c context.Context) { + sc := wrapContextChain(c) + if sc == nil { + suite.failer.Fail("An AroundNode failed to pass a valid Ginkgo SpecContext in. You must always pass in a context derived from the context passed to you.", aroundNodes[i].CodeLocation) + return + } + i++ + if i < len(aroundNodes) { + aroundNodes[i].Body(sc, f) + } else { + node.Body(sc) + } + } + aroundNodes[0].Body(sc, f) + if i != len(aroundNodes) { + suite.failer.Fail("An AroundNode failed to call the passed in function.", aroundNodes[i].CodeLocation) + } + } else { + node.Body(sc) + } finished = true }() diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index b4ecc7cb83..9806e315a6 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -229,3 +229,9 @@ func (t *ginkgoTestingTProxy) ParallelTotal() int { func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { return t.attachProgressReporter(f) } +func (t *ginkgoTestingTProxy) Output() io.Writer { + return t.writer +} +func (t *ginkgoTestingTProxy) Attr(key, value string) { + t.addReportEntry(key, value, internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose) +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 74ad0768b7..026d9cf9b3 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { if len(report.SuiteLabels) > 0 { r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) } + if len(report.SuiteSemVerConstraints) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", "))) + } r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) @@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { bannerWidth = len(labels) + 2 } } + if len(report.SuiteSemVerConstraints) > 0 { + semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints)) + if len(semVerConstraints)+2 > bannerWidth { + bannerWidth = len(semVerConstraints) + 2 + } + } r.emitBlock(strings.Repeat("=", bannerWidth)) out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) @@ -371,13 +381,22 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim cursor := 0 for _, entry := range timeline { tl := entry.GetTimelineLocation() - if tl.Offset < len(gw) { - r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) - cursor = tl.Offset - } else if cursor < len(gw) { + + end := tl.Offset + if end > len(gw) { + end = len(gw) + } + if end < cursor { + end = cursor + } + if cursor < end && cursor <= len(gw) && end <= len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:end])) + cursor = end + } else if cursor < len(gw) && end == len(gw) { r.emit(r.fi(indent, "%s", gw[cursor:])) cursor = len(gw) } + switch x := entry.(type) { case types.Failure: if isVeryVerbose { @@ -394,7 +413,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim case types.ReportEntry: r.emitReportEntry(indent, x) case types.ProgressReport: - r.emitProgressReport(indent, false, x) + r.emitProgressReport(indent, false, isVeryVerbose, x) case types.SpecEvent: if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { r.emitSpecEvent(indent, x, isVeryVerbose) @@ -448,7 +467,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur if !failure.ProgressReport.IsZero() { r.emitBlock("\n") - r.emitProgressReport(indent, false, failure.ProgressReport) + r.emitProgressReport(indent, false, false, failure.ProgressReport) } if failure.AdditionalFailure != nil && includeAdditionalFailure { @@ -464,11 +483,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) - r.emitProgressReport(1, shouldEmitGW, report) + r.emitProgressReport(1, shouldEmitGW, true, report) r.emitDelimiter(1) } -func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) { if report.Message != "" { r.emitBlock(r.fi(indent, report.Message+"\n")) indent += 1 @@ -504,6 +523,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::group::Progress Report")) + } + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) @@ -550,6 +573,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) r.emitGoroutines(indent, otherGoroutines...) } + + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::endgroup::")) + } } func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { @@ -698,8 +725,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { } func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { - texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} - texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{} + texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...) if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) @@ -707,6 +734,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) + semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints) locations = append(locations, report.LeafNodeLocation) failureLocation := report.Failure.FailureNodeLocation @@ -720,6 +748,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + semVerConstraints = append([][]string{{}}, semVerConstraints...) highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex @@ -747,6 +776,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(labels[i]) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) } + if len(semVerConstraints[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", ")) + } out += "\n" out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } @@ -770,6 +802,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) } + flattenedSemVerConstraints := report.SemVerConstraints() + if len(flattenedSemVerConstraints) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", ")) + } out += "\n" if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go new file mode 100644 index 0000000000..d02fb7a1ae --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go @@ -0,0 +1,61 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/internal/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +// GenerateGoTestJSONReport produces a JSON-formatted in the test2json format used by `go test -json` +func GenerateGoTestJSONReport(report types.Report, destination string) error { + // walk report and generate test2json-compatible objects + // JSON-encode the objects into filename + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } + f, err := os.Create(destination) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + r := reporters.NewGoJSONReporter( + enc, + systemErrForUnstructuredReporters, + systemOutForUnstructuredReporters, + ) + return r.Write(report) +} + +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupGoTestJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } + f, err := os.Create(destination) + if err != nil { + return messages, err + } + defer f.Close() + + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + _, err = f.Write(data) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not write to %s:\n%s", destination, err.Error())) + continue + } + os.Remove(source) + } + return messages, nil +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 562e0f62ba..828f893fb8 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -36,6 +36,9 @@ type JunitReportConfig struct { // Enable OmitSpecLabels to prevent labels from appearing in the spec name OmitSpecLabels bool + // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name + OmitSpecSemVerConstraints bool + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool @@ -169,9 +172,11 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))}, {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"SemVerFilter", report.SuiteConfig.SemVerFilter}, {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, @@ -207,6 +212,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit owner = matches[1] } } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = strings.TrimSpace(name) test := JUnitTestCase{ diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index e990ad82e1..55e1d1f4f7 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error { name := report.SuiteDescription labels := report.SuiteLabels + semVerConstraints := report.SuiteSemVerConstraints if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) for _, spec := range report.SpecReports { name := fmt.Sprintf("[%s]", spec.LeafNodeType) @@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error { if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = tcEscape(name) fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/openshift/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index 5bf2e62e90..4e86dba84d 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -27,6 +27,8 @@ CurrentSpecReport returns information about the current running spec. The returned object is a types.SpecReport which includes helper methods to make extracting information about the spec easier. +During construction of the test tree the result is empty. + You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec */ @@ -34,6 +36,31 @@ func CurrentSpecReport() SpecReport { return global.Suite.CurrentSpecReport() } +/* +ConstructionNodeReport describes the container nodes during construction of +the spec tree. It provides a subset of the information that is provided +by SpecReport at runtime. + +It is documented here: [types.ConstructionNodeReport] +*/ +type ConstructionNodeReport = types.ConstructionNodeReport + +/* +CurrentConstructionNodeReport returns information about the current container nodes +that are leading to the current path in the spec tree. +The returned object is a types.ConstructionNodeReport which includes helper methods +to make extracting information about the spec easier. + +May only be called during construction of the spec tree. It panics when +called while tests are running. Use CurrentSpecReport instead in that +phase. + +You can learn more about ConstructionNodeReport here: [types.ConstructionNodeReport] +*/ +func CurrentTreeConstructionNodeReport() ConstructionNodeReport { + return global.Suite.CurrentConstructionNodeReport() +} + /* ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter @@ -92,7 +119,7 @@ func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))) } /* @@ -116,7 +143,7 @@ func ReportAfterEach(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))) } /* @@ -145,7 +172,7 @@ You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spe func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))) } /* @@ -165,7 +192,7 @@ ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Co When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes -In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. +In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, GoJSON, JUnit, and Teamcity formatted reports using the --json-report, --gojson-report, --junit-report, and --teamcity-report ginkgo CLI flags. You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically @@ -177,7 +204,7 @@ You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spe func ReportAfterSuite(text string, body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))) } func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { @@ -188,6 +215,12 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error())) } } + if reporterConfig.GoJSONReport != "" { + err := reporters.GenerateGoTestJSONReport(report, reporterConfig.GoJSONReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Go JSON report:\n%s", err.Error())) + } + } if reporterConfig.JUnitReport != "" { err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport) if err != nil { @@ -206,6 +239,9 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re if reporterConfig.JSONReport != "" { flags = append(flags, "--json-report") } + if reporterConfig.GoJSONReport != "" { + flags = append(flags, "--gojson-report") + } if reporterConfig.JUnitReport != "" { flags = append(flags, "--junit-report") } @@ -213,9 +249,11 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re flags = append(flags, "--teamcity-report") } pushNode(internal.NewNode( - deprecationTracker, types.NodeTypeReportAfterSuite, - fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), - body, - types.NewCustomCodeLocation("autogenerated by Ginkgo"), + internal.TransformNewNodeArgs( + exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, + fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), + body, + types.NewCustomCodeLocation("autogenerated by Ginkgo"), + ), )) } diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/openshift/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index b9e0ca9ef7..1031aa8554 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -309,11 +309,11 @@ func generateTable(description string, isSubtree bool, args ...any) { internalNodeType = types.NodeTypeContainer } - pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...)) + pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, internalNodeType, description, internalNodeArgs...))) } }) - pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) + pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))) } func invokeFunction(function any, parameters []any) []reflect.Value { diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/openshift/vendor/github.com/onsi/ginkgo/v2/types/around_node.go new file mode 100644 index 0000000000..a069e0623d --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/types/around_node.go @@ -0,0 +1,56 @@ +package types + +import ( + "context" +) + +type AroundNodeAllowedFuncs interface { + ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func() +} +type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context)) + +func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator { + if f == nil { + panic("BuildAroundNode cannot be called with a nil function.") + } + var aroundNodeFunc func(context.Context, func(context.Context)) + switch x := any(f).(type) { + case func(context.Context, func(context.Context)): + aroundNodeFunc = x + case func(context.Context) context.Context: + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + ctx = x(ctx) + body(ctx) + } + case func(): + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + x() + body(ctx) + } + } + + return AroundNodeDecorator{ + Body: aroundNodeFunc, + CodeLocation: cl, + } +} + +type AroundNodeDecorator struct { + Body AroundNodeFunc + CodeLocation CodeLocation +} + +type AroundNodes []AroundNodeDecorator + +func (an AroundNodes) Clone() AroundNodes { + out := make(AroundNodes, len(an)) + copy(out, an) + return out +} + +func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes { + out := make(AroundNodes, len(an)+len(other)) + copy(out, an) + copy(out[len(an):], other) + return out +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/types/config.go b/openshift/vendor/github.com/onsi/ginkgo/v2/types/config.go index 2e827efe30..f847036046 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -24,6 +24,7 @@ type SuiteConfig struct { FocusFiles []string SkipFiles []string LabelFilter string + SemVerFilter string FailOnPending bool FailOnEmpty bool FailFast bool @@ -95,6 +96,7 @@ type ReporterConfig struct { ForceNewlines bool JSONReport string + GoJSONReport string JUnitReport string TeamcityReport string } @@ -111,7 +113,7 @@ func (rc ReporterConfig) Verbosity() VerbosityLevel { } func (rc ReporterConfig) WillGenerateReport() bool { - return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" + return rc.JSONReport != "" || rc.GoJSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" } func NewDefaultReporterConfig() ReporterConfig { @@ -308,6 +310,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version", + Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", @@ -356,6 +360,8 @@ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.GoJSONReport", Name: "gojson-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Go JSON-formatted test report at the specified location."}, {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", @@ -443,6 +449,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re } } + if suiteConfig.SemVerFilter != "" { + _, err := ParseSemVerFilter(suiteConfig.SemVerFilter) + if err != nil { + errors = append(errors, err) + } + } + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { case "", "dup", "swap", "none": default: @@ -573,6 +586,9 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, +} + +var GoBuildOFlags = GinkgoFlags{ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", Usage: "output binary path (including name)."}, } @@ -673,7 +689,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( - GoBuildFlags, + GoBuildFlags.CopyAppend(GoBuildOFlags...), map[string]any{ "Go": &goFlagsConfig, }, @@ -763,6 +779,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoBuildOFlags...) bindings := map[string]any{ "C": cliConfig, diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/openshift/vendor/github.com/onsi/ginkgo/v2/types/errors.go index c2796b5490..59313238cf 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { } } +func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid SemVerConstraint", + Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg), + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + +func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty SemVerConstraint", + Message: "SemVerConstraint cannot be empty", + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + /* Table errors */ func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { return GinkgoError{ diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/openshift/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go new file mode 100644 index 0000000000..3fc2ed144b --- /dev/null +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go @@ -0,0 +1,60 @@ +package types + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" +) + +type SemVerFilter func([]string) bool + +func MustParseSemVerFilter(input string) SemVerFilter { + filter, err := ParseSemVerFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) { + if filterVersion == "" { + return func(_ []string) bool { return true }, nil + } + + targetVersion, err := semver.NewVersion(filterVersion) + if err != nil { + return nil, fmt.Errorf("invalid filter version: %w", err) + } + + return func(constraints []string) bool { + // unconstrained specs always run + if len(constraints) == 0 { + return true + } + + for _, constraintStr := range constraints { + constraint, err := semver.NewConstraint(constraintStr) + if err != nil { + return false + } + + if !constraint.Check(targetVersion) { + return false + } + } + + return true + }, nil +} + +func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) { + if len(semVerConstraint) == 0 { + return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl) + } + _, err := semver.NewConstraint(semVerConstraint) + if err != nil { + return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl) + } + + return semVerConstraint, nil +} diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/types/types.go b/openshift/vendor/github.com/onsi/ginkgo/v2/types/types.go index ddcbec1ba8..9981a0dd68 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" "sort" "strings" "time" @@ -19,6 +20,57 @@ func init() { } } +// ConstructionNodeReport captures information about a Ginkgo spec. +type ConstructionNodeReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // IsSerial captures whether the any container has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether any container is an Ordered container + IsInOrderedContainer bool +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report ConstructionNodeReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report ConstructionNodeReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + + return out +} + // Report captures information about a Ginkgo test run type Report struct { //SuitePath captures the absolute path to the test suite @@ -30,6 +82,9 @@ type Report struct { //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function SuiteLabels []string + //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function + SuiteSemVerConstraints []string + //SuiteSucceeded captures the success or failure status of the test run //If true, the test run is considered successful. //If false, the test run is considered unsuccessful @@ -129,13 +184,21 @@ type SpecReport struct { // all Describe/Context/When containers in this spec's hierarchy ContainerHierarchyLabels [][]string - // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be // one of the NodeTypesForSuiteLevelNodes node types) - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + + // Captures the Spec Priority + SpecPriority int // State captures whether the spec has passed, failed, etc. State SpecState @@ -198,48 +261,52 @@ type SpecReport struct { func (report SpecReport) MarshalJSON() ([]byte, error) { //All this to avoid emitting an empty Failure struct in the JSON out := struct { - ContainerHierarchyTexts []string - ContainerHierarchyLocations []CodeLocation - ContainerHierarchyLabels [][]string - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string - State SpecState - StartTime time.Time - EndTime time.Time - RunTime time.Duration - ParallelProcess int - Failure *Failure `json:",omitempty"` - NumAttempts int - MaxFlakeAttempts int - MaxMustPassRepeatedly int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` - ProgressReports []ProgressReport `json:",omitempty"` - AdditionalFailures []AdditionalFailure `json:",omitempty"` - SpecEvents SpecEvents `json:",omitempty"` + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + ContainerHierarchySemVerConstraints [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ - ContainerHierarchyTexts: report.ContainerHierarchyTexts, - ContainerHierarchyLocations: report.ContainerHierarchyLocations, - ContainerHierarchyLabels: report.ContainerHierarchyLabels, - LeafNodeType: report.LeafNodeType, - LeafNodeLocation: report.LeafNodeLocation, - LeafNodeLabels: report.LeafNodeLabels, - LeafNodeText: report.LeafNodeText, - State: report.State, - StartTime: report.StartTime, - EndTime: report.EndTime, - RunTime: report.RunTime, - ParallelProcess: report.ParallelProcess, - Failure: nil, - ReportEntries: nil, - NumAttempts: report.NumAttempts, - MaxFlakeAttempts: report.MaxFlakeAttempts, - MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, - CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, - CapturedStdOutErr: report.CapturedStdOutErr, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, } if !report.Failure.IsZero() { @@ -287,6 +354,9 @@ func (report SpecReport) FullText() string { if report.LeafNodeText != "" { texts = append(texts, report.LeafNodeText) } + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) return strings.Join(texts, " ") } @@ -312,6 +382,28 @@ func (report SpecReport) Labels() []string { return out } +// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints. +func (report SpecReport) SemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints { + for _, semVerConstraint := range semVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + } + for _, semVerConstraint := range report.LeafNodeSemVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + + return out +} + // MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) @@ -321,6 +413,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } +// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) { + filter, err := ParseSemVerFilter(version) + if err != nil { + return false, err + } + return filter(report.SemVerConstraints()), nil +} + // FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName diff --git a/openshift/vendor/github.com/onsi/ginkgo/v2/types/version.go b/openshift/vendor/github.com/onsi/ginkgo/v2/types/version.go index 158ac2fd89..2a50192871 100644 --- a/openshift/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/openshift/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.23.4" +const VERSION = "2.27.3" diff --git a/openshift/vendor/github.com/onsi/gomega/CHANGELOG.md b/openshift/vendor/github.com/onsi/gomega/CHANGELOG.md index 890d892228..b7d7309f3f 100644 --- a/openshift/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/openshift/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,32 @@ +## 1.38.2 + +- roll back to go 1.23.0 [c404969] + +## 1.38.1 + +### Fixes + +Numerous minor fixes and dependency bumps + +## 1.38.0 + +### Features +- gstruct handles extra unexported fields [4ee7ed0] + +### Fixes +- support [] in IgnoringTopFunction function signatures (#851) [36bbf72] + +### Maintenance +- Bump golang.org/x/net from 0.40.0 to 0.41.0 (#846) [529d408] +- Fix typo [acd1f55] +- Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#835) [bae65a0] +- Bump nokogiri from 1.18.4 to 1.18.8 in /docs (#842) [8dda91f] +- Bump golang.org/x/net from 0.39.0 to 0.40.0 (#843) [212d812] +- Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#839) [59bd7f9] +- Bump nokogiri from 1.18.1 to 1.18.4 in /docs (#834) [328c729] +- Bump uri from 1.0.2 to 1.0.3 in /docs (#826) [9a798a1] +- Bump golang.org/x/net from 0.37.0 to 0.39.0 (#841) [04a72c6] + ## 1.37.0 ### Features diff --git a/openshift/vendor/github.com/onsi/gomega/gomega_dsl.go b/openshift/vendor/github.com/onsi/gomega/gomega_dsl.go index a491a64be7..fdba34ee9d 100644 --- a/openshift/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/openshift/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.37.0" +const GOMEGA_VERSION = "1.38.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -178,7 +178,7 @@ func ensureDefaultGomegaIsConfigured() { // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: // diff --git a/openshift/vendor/github.com/onsi/gomega/internal/async_assertion.go b/openshift/vendor/github.com/onsi/gomega/internal/async_assertion.go index a3a646e4ad..4121505b62 100644 --- a/openshift/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/openshift/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -452,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } else { var fgErr formattedGomegaError - if errors.As(actualErr, &fgErr) { + if errors.As(matcherErr, &fgErr) { message += fgErr.FormattedGomegaError() + "\n" } else { message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) diff --git a/openshift/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/openshift/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 532fc37449..ce74eee4c7 100644 --- a/openshift/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/openshift/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -2,6 +2,7 @@ package matchers import ( "bytes" + "errors" "fmt" "github.com/google/go-cmp/cmp" @@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr if err, ok := r.(error); ok { matchErr = err } else if errMsg, ok := r.(string); ok { - matchErr = fmt.Errorf(errMsg) + matchErr = errors.New(errMsg) } } }() diff --git a/openshift/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/openshift/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 95057c26cc..c3da9bd48b 100644 --- a/openshift/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/openshift/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type MatchYAMLMatcher struct { diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c0..2331b8b4f3 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index 8b016355ad..7bac0da33d 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { groups = append(groups, group) } return groups @@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) + _, err := fmt.Fprintf(buf, format, args...) return err } ws := func(s string) error { diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef926..d273b6640e 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f292..5fe8d3b4d2 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 592eec3e24..76e59f1288 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { case pb.Counter != nil: pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] case pb.Histogram != nil: + h := pb.Histogram for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && + e.GetTimestamp() != nil { + h.Exemplars = append(h.Exemplars, e) + if len(h.Bucket) == 0 { + // Don't proceed to classic buckets if there are none. + continue + } + } + // h.Bucket are sorted by UpperBound. + i := sort.Search(len(h.Bucket), func(i int) bool { + return h.Bucket[i].GetUpperBound() >= e.GetValue() }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e + if i < len(h.Bucket) { + h.Bucket[i].Exemplar = e } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + CumulativeCount: proto.Uint64(h.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + h.Bucket = append(h.Bucket, b) } } default: @@ -227,6 +237,7 @@ type Exemplar struct { // Only last applicable exemplar is injected from the list. // For example for Counter it means last exemplar is injected. // For Histogram, it means last applicable exemplar for each bucket is injected. +// For a Native Histogram, all valid exemplars are injected. // // NewMetricWithExemplars works best with MustNewConstMetric and // MustNewConstHistogram, see example. diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go index 0a61b98461..b32c95fa3f 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -25,9 +25,9 @@ import ( "golang.org/x/sys/unix" ) -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// errNotImplemented is returned by stub functions that replace cgo functions, when cgo // isn't available. -var notImplementedErr = errors.New("not implemented") +var errNotImplemented = errors.New("not implemented") type memoryInfo struct { vsize uint64 // Virtual memory size in bytes @@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if memInfo, err := getMemory(); err == nil { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { + } else if !errors.Is(err, errNotImplemented) { // Don't report an error when support is not compiled in. c.reportError(ch, c.rss, err) c.reportError(ch, c.vsize, err) diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go index 8ddb0995d6..378865129b 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -16,7 +16,7 @@ package prometheus func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr + return nil, errNotImplemented } // describe returns all descriptions of the collector for Darwin. diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 9f4b130bef..8074f70f5d 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if netstat, err := p.Netstat(); err == nil { var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets + if netstat.InOctets != nil { + inOctets = *netstat.InOctets } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets + if netstat.OutOctets != nil { + outOctets = *netstat.OutOctets } ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 356edb7868..9332b0249a 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { labels := prometheus.Labels{} - if !(code || method) { + if !code && !method { return labels } diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 2c808eece0..487b466563 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { return false } - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) + return m.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those @@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool { return false } - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) + return m.deleteByHashWithLabels(h, labels, m.curry) } // DeletePartialMatch deletes all metrics where the variable labels contain all of those @@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels, closer := constrainLabels(m.desc, labels) defer closer() - return m.metricMap.deleteByLabels(labels, m.curry) + return m.deleteByLabels(labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't @@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names @@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { diff --git a/openshift/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/openshift/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 25da157f15..2ed1285068 100644 --- a/openshift/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/openshift/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, +// fact, those metrics are already prefixed with "go_" or "process_", // respectively.) // // Conflicts between Collectors registered through the original Registerer with @@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { } } +// WrapCollectorWith returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapCollectorWith can be useful to work with multiple instances of a third +// party library that does not expose enough flexibility on the lifecycle of its +// registered metrics. +// For example, let's say you have a foo.New(reg Registerer) constructor that +// registers metrics but never unregisters them, and you want to create multiple +// instances of foo.Foo with different labels. +// The way to achieve that, is to create a new Registry, pass it to foo.New, +// then use WrapCollectorWith to wrap that Registry with the desired labels and +// register that as a collector in your main Registry. +// Then you can un-register the wrapped collector effectively un-registering the +// metrics registered by foo.New. +func WrapCollectorWith(labels Labels, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + labels: labels, + } +} + +// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided prefix to the name of all Metrics it collects. +// +// See the documentation of WrapCollectorWith for more details on the use case. +func WrapCollectorWithPrefix(prefix string, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + prefix: prefix, + } +} + type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string diff --git a/openshift/vendor/github.com/prometheus/common/expfmt/decode.go b/openshift/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f..7b762370e2 100644 --- a/openshift/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/openshift/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/openshift/vendor/github.com/prometheus/common/expfmt/encode.go b/openshift/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f55..73c24dfbc9 100644 --- a/openshift/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/openshift/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/openshift/vendor/github.com/prometheus/common/expfmt/expfmt.go b/openshift/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560d..c34c7de432 100644 --- a/openshift/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/openshift/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/openshift/vendor/github.com/prometheus/common/expfmt/fuzz.go b/openshift/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4e..0290f6abc4 100644 --- a/openshift/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/openshift/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/openshift/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/openshift/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1f..8dbf6d04ed 100644 --- a/openshift/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/openshift/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/openshift/vendor/github.com/prometheus/common/expfmt/text_create.go b/openshift/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b33..c4e9c1bbc3 100644 --- a/openshift/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/openshift/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/openshift/vendor/github.com/prometheus/common/expfmt/text_parse.go b/openshift/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d2..8f2edde324 100644 --- a/openshift/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/openshift/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,25 +363,30 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/openshift/vendor/github.com/prometheus/common/model/alert.go b/openshift/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..460f554f29 100644 --- a/openshift/vendor/github.com/prometheus/common/model/alert.go +++ b/openshift/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/openshift/vendor/github.com/prometheus/common/model/labels.go b/openshift/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..dfeb34be5f 100644 --- a/openshift/vendor/github.com/prometheus/common/model/labels.go +++ b/openshift/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,33 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/openshift/vendor/github.com/prometheus/common/model/labelset.go b/openshift/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da33..9de47b2568 100644 --- a/openshift/vendor/github.com/prometheus/common/model/labelset.go +++ b/openshift/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/openshift/vendor/github.com/prometheus/common/model/metric.go b/openshift/vendor/github.com/prometheus/common/model/metric.go index 5766107cf9..3feebf328a 100644 --- a/openshift/vendor/github.com/prometheus/common/model/metric.go +++ b/openshift/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,17 +24,30 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,16 +64,151 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -89,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -163,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -298,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -315,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -333,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -397,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/openshift/vendor/github.com/prometheus/common/model/time.go b/openshift/vendor/github.com/prometheus/common/model/time.go index 5727452c1e..1730b0fdc1 100644 --- a/openshift/vendor/github.com/prometheus/common/model/time.go +++ b/openshift/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/openshift/vendor/github.com/prometheus/common/model/value.go b/openshift/vendor/github.com/prometheus/common/model/value.go index 8050637d82..a9995a37ee 100644 --- a/openshift/vendor/github.com/prometheus/common/model/value.go +++ b/openshift/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/openshift/vendor/github.com/prometheus/common/model/value_histogram.go b/openshift/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e83..91ce5b7a45 100644 --- a/openshift/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/openshift/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/openshift/vendor/github.com/prometheus/common/model/value_type.go b/openshift/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee63..078910f46b 100644 --- a/openshift/vendor/github.com/prometheus/common/model/value_type.go +++ b/openshift/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/openshift/vendor/github.com/prometheus/procfs/.golangci.yml b/openshift/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67a..3c3bf910fd 100644 --- a/openshift/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/openshift/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/openshift/vendor/github.com/prometheus/procfs/Makefile.common b/openshift/vendor/github.com/prometheus/procfs/Makefile.common index 1617292350..0ed55c2ba2 100644 --- a/openshift/vendor/github.com/prometheus/procfs/Makefile.common +++ b/openshift/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.0.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/openshift/vendor/github.com/prometheus/procfs/README.md b/openshift/vendor/github.com/prometheus/procfs/README.md index 1224816c2a..0718239cf1 100644 --- a/openshift/vendor/github.com/prometheus/procfs/README.md +++ b/openshift/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/openshift/vendor/github.com/prometheus/procfs/arp.go b/openshift/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7ccc..2e53344151 100644 --- a/openshift/vendor/github.com/prometheus/procfs/arp.go +++ b/openshift/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/openshift/vendor/github.com/prometheus/procfs/fs.go b/openshift/vendor/github.com/prometheus/procfs/fs.go index 4980c875bf..9bdaccc7c8 100644 --- a/openshift/vendor/github.com/prometheus/procfs/fs.go +++ b/openshift/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/openshift/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/openshift/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69a..1b5bdbdf84 100644 --- a/openshift/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/openshift/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/openshift/vendor/github.com/prometheus/procfs/fscache.go b/openshift/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa03..7db8633077 100644 --- a/openshift/vendor/github.com/prometheus/procfs/fscache.go +++ b/openshift/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/openshift/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/openshift/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610e..3a43e83915 100644 --- a/openshift/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/openshift/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/openshift/vendor/github.com/prometheus/procfs/internal/util/parse.go b/openshift/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc788..5a7d2df06a 100644 --- a/openshift/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/openshift/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/openshift/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/openshift/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875ceec..d5404a6d72 100644 --- a/openshift/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/openshift/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/openshift/vendor/github.com/prometheus/procfs/mountstats.go b/openshift/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c810..50caa73274 100644 --- a/openshift/vendor/github.com/prometheus/procfs/mountstats.go +++ b/openshift/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/openshift/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/openshift/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 0000000000..f50b38e352 --- /dev/null +++ b/openshift/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/openshift/vendor/github.com/prometheus/procfs/net_ip_socket.go b/openshift/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a4..19e3378f72 100644 --- a/openshift/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/openshift/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/openshift/vendor/github.com/prometheus/procfs/net_protocols.go b/openshift/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709f..8d4b1ac05b 100644 --- a/openshift/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/openshift/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/openshift/vendor/github.com/prometheus/procfs/net_tcp.go b/openshift/vendor/github.com/prometheus/procfs/net_tcp.go index 5277629557..0396d72015 100644 --- a/openshift/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/openshift/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/openshift/vendor/github.com/prometheus/procfs/net_unix.go b/openshift/vendor/github.com/prometheus/procfs/net_unix.go index d868cebdaa..d7e0cacb4c 100644 --- a/openshift/vendor/github.com/prometheus/procfs/net_unix.go +++ b/openshift/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/openshift/vendor/github.com/prometheus/procfs/proc.go b/openshift/vendor/github.com/prometheus/procfs/proc.go index 142796368f..368187fa88 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_cgroup.go b/openshift/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f571..4a64347c03 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_io.go b/openshift/vendor/github.com/prometheus/procfs/proc_io.go index 776f349717..d15b66ddb6 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_io.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_netstat.go b/openshift/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d794..4248c1716e 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_smaps.go b/openshift/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e8208..9a297afcf8 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_snmp.go b/openshift/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642a..4bdc90b07e 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_snmp6.go b/openshift/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a13..fb7fd3995b 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_status.go b/openshift/vendor/github.com/prometheus/procfs/proc_status.go index a055197c63..dd8aa56885 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_status.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/openshift/vendor/github.com/prometheus/procfs/proc_sys.go b/openshift/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef8..3810d1ac99 100644 --- a/openshift/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/openshift/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/openshift/vendor/github.com/prometheus/procfs/softirqs.go b/openshift/vendor/github.com/prometheus/procfs/softirqs.go index 28708e0745..403e6ae708 100644 --- a/openshift/vendor/github.com/prometheus/procfs/softirqs.go +++ b/openshift/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/openshift/vendor/github.com/spf13/pflag/README.md b/openshift/vendor/github.com/spf13/pflag/README.md index 7eacc5bdbe..388c4e5ead 100644 --- a/openshift/vendor/github.com/spf13/pflag/README.md +++ b/openshift/vendor/github.com/spf13/pflag/README.md @@ -284,6 +284,33 @@ func main() { } ``` +### Using pflag with go test +`pflag` does not parse the shorthand versions of go test's built-in flags (i.e., those starting with `-test.`). +For more context, see issues [#63](https://github.com/spf13/pflag/issues/63) and [#238](https://github.com/spf13/pflag/issues/238) for more details. + +For example, if you use pflag in your `TestMain` function and call `pflag.Parse()` after defining your custom flags, running a test like this: +```bash +go test /your/tests -run ^YourTest -v --your-test-pflags +``` +will result in the `-v` flag being ignored. This happens because of the way pflag handles flag parsing, skipping over go test's built-in shorthand flags. +To work around this, you can use the `ParseSkippedFlags` function, which ensures that go test's flags are parsed separately using the standard flag package. + +**Example**: You want to parse go test flags that are otherwise ignore by `pflag.Parse()` +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.ParseSkippedFlags(os.Args[1:], goflag.CommandLine) + flag.Parse() +} +``` + ## More info You can see the full reference documentation of the pflag package diff --git a/openshift/vendor/github.com/spf13/pflag/bool_func.go b/openshift/vendor/github.com/spf13/pflag/bool_func.go new file mode 100644 index 0000000000..83d77afa89 --- /dev/null +++ b/openshift/vendor/github.com/spf13/pflag/bool_func.go @@ -0,0 +1,40 @@ +package pflag + +// -- func Value +type boolfuncValue func(string) error + +func (f boolfuncValue) Set(s string) error { return f(s) } + +func (f boolfuncValue) Type() string { return "boolfunc" } + +func (f boolfuncValue) String() string { return "" } // same behavior as stdlib 'flag' package + +func (f boolfuncValue) IsBoolFlag() bool { return true } + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func (f *FlagSet) BoolFunc(name string, usage string, fn func(string) error) { + f.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + var val Value = boolfuncValue(fn) + flag := f.VarPF(val, name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func BoolFunc(name string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, shorthand, usage, fn) +} diff --git a/openshift/vendor/github.com/spf13/pflag/count.go b/openshift/vendor/github.com/spf13/pflag/count.go index a0b2679f71..d49c0143c1 100644 --- a/openshift/vendor/github.com/spf13/pflag/count.go +++ b/openshift/vendor/github.com/spf13/pflag/count.go @@ -85,7 +85,7 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func Count(name string, usage string) *int { return CommandLine.CountP(name, "", usage) } diff --git a/openshift/vendor/github.com/spf13/pflag/errors.go b/openshift/vendor/github.com/spf13/pflag/errors.go new file mode 100644 index 0000000000..ff11b66bef --- /dev/null +++ b/openshift/vendor/github.com/spf13/pflag/errors.go @@ -0,0 +1,149 @@ +package pflag + +import "fmt" + +// notExistErrorMessageType specifies which flavor of "flag does not exist" +// is printed by NotExistError. This allows the related errors to be grouped +// under a single NotExistError struct without making a breaking change to +// the error message text. +type notExistErrorMessageType int + +const ( + flagNotExistMessage notExistErrorMessageType = iota + flagNotDefinedMessage + flagNoSuchFlagMessage + flagUnknownFlagMessage + flagUnknownShorthandFlagMessage +) + +// NotExistError is the error returned when trying to access a flag that +// does not exist in the FlagSet. +type NotExistError struct { + name string + specifiedShorthands string + messageType notExistErrorMessageType +} + +// Error implements error. +func (e *NotExistError) Error() string { + switch e.messageType { + case flagNotExistMessage: + return fmt.Sprintf("flag %q does not exist", e.name) + + case flagNotDefinedMessage: + return fmt.Sprintf("flag accessed but not defined: %s", e.name) + + case flagNoSuchFlagMessage: + return fmt.Sprintf("no such flag -%v", e.name) + + case flagUnknownFlagMessage: + return fmt.Sprintf("unknown flag: --%s", e.name) + + case flagUnknownShorthandFlagMessage: + c := rune(e.name[0]) + return fmt.Sprintf("unknown shorthand flag: %q in -%s", c, e.specifiedShorthands) + } + + panic(fmt.Errorf("unknown flagNotExistErrorMessageType: %v", e.messageType)) +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *NotExistError) GetSpecifiedName() string { + return e.name +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *NotExistError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// ValueRequiredError is the error returned when a flag needs an argument but +// no argument was provided. +type ValueRequiredError struct { + flag *Flag + specifiedName string + specifiedShorthands string +} + +// Error implements error. +func (e *ValueRequiredError) Error() string { + if len(e.specifiedShorthands) > 0 { + c := rune(e.specifiedName[0]) + return fmt.Sprintf("flag needs an argument: %q in -%s", c, e.specifiedShorthands) + } + + return fmt.Sprintf("flag needs an argument: --%s", e.specifiedName) +} + +// GetFlag returns the flag for which the error occurred. +func (e *ValueRequiredError) GetFlag() *Flag { + return e.flag +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *ValueRequiredError) GetSpecifiedName() string { + return e.specifiedName +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *ValueRequiredError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// InvalidValueError is the error returned when an invalid value is used +// for a flag. +type InvalidValueError struct { + flag *Flag + value string + cause error +} + +// Error implements error. +func (e *InvalidValueError) Error() string { + flag := e.flag + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Sprintf("invalid argument %q for %q flag: %v", e.value, flagName, e.cause) +} + +// Unwrap implements errors.Unwrap. +func (e *InvalidValueError) Unwrap() error { + return e.cause +} + +// GetFlag returns the flag for which the error occurred. +func (e *InvalidValueError) GetFlag() *Flag { + return e.flag +} + +// GetValue returns the invalid value that was provided. +func (e *InvalidValueError) GetValue() string { + return e.value +} + +// InvalidSyntaxError is the error returned when a bad flag name is passed on +// the command line. +type InvalidSyntaxError struct { + specifiedFlag string +} + +// Error implements error. +func (e *InvalidSyntaxError) Error() string { + return fmt.Sprintf("bad flag syntax: %s", e.specifiedFlag) +} + +// GetSpecifiedName returns the exact flag (with dashes) as it +// appeared in the parsed arguments. +func (e *InvalidSyntaxError) GetSpecifiedFlag() string { + return e.specifiedFlag +} diff --git a/openshift/vendor/github.com/spf13/pflag/flag.go b/openshift/vendor/github.com/spf13/pflag/flag.go index 7c058de374..2fd3c57597 100644 --- a/openshift/vendor/github.com/spf13/pflag/flag.go +++ b/openshift/vendor/github.com/spf13/pflag/flag.go @@ -27,23 +27,32 @@ unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } + Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) @@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } flag.VarP(&flagval, "varname", "v", "help message") + Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: + --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags -f -abc @@ -124,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -145,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -381,7 +404,7 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { flag := f.Lookup(name) if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) + err := &NotExistError{name: name, messageType: flagNotDefinedMessage} return nil, err } @@ -411,7 +434,7 @@ func (f *FlagSet) ArgsLenAtDash() int { func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -427,7 +450,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -441,7 +464,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro func (f *FlagSet) MarkHidden(name string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } flag.Hidden = true return nil @@ -464,18 +487,16 @@ func (f *FlagSet) Set(name, value string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } err := flag.Value.Set(value) if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) + return &InvalidValueError{ + flag: flag, + value: value, + cause: err, } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } if !flag.Changed { @@ -501,7 +522,7 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } if flag.Annotations == nil { flag.Annotations = map[string][]string{} @@ -538,7 +559,7 @@ func (f *FlagSet) PrintDefaults() { func (f *Flag) defaultIsZeroValue() bool { switch f.Value.(type) { case boolFlag: - return f.DefValue == "false" + return f.DefValue == "false" || f.DefValue == "" case *durationValue: // Beginning in Go 1.7, duration zero values are "0s" return f.DefValue == "0" || f.DefValue == "0s" @@ -551,7 +572,7 @@ func (f *Flag) defaultIsZeroValue() bool { case *intSliceValue, *stringSliceValue, *stringArrayValue: return f.DefValue == "[]" default: - switch f.Value.String() { + switch f.DefValue { case "false": return true case "": @@ -588,8 +609,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = flag.Value.Type() switch name { - case "bool": + case "bool", "boolfunc": name = "" + case "func": + name = "value" case "float64": name = "float" case "int64": @@ -707,7 +730,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { switch flag.Value.Type() { case "string": line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": + case "bool", "boolfunc": if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -911,12 +934,10 @@ func VarP(value Value, name, shorthand, usage string) { CommandLine.VarP(value, name, shorthand, usage) } -// failf prints to standard error a formatted error and usage message and +// fail prints an error message and usage message to standard error and // returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) +func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -934,9 +955,9 @@ func (f *FlagSet) usage() { } } -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) +// --unknown (args will be empty) +// --unknown --next-flag ... (args will be --next-flag ...) +// --unknown arg ... (args will be arg ...) func stripUnknownFlagValue(args []string) []string { if len(args) == 0 { //--unknown @@ -960,7 +981,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = args name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) + err = f.fail(&InvalidSyntaxError{specifiedFlag: s}) return } @@ -974,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -982,7 +1005,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin return stripUnknownFlagValue(a), nil default: - err = f.failf("unknown flag: --%s", name) + err = f.fail(&NotExistError{name: name, messageType: flagUnknownFlagMessage}) return } } @@ -1000,13 +1023,16 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = a[1:] } else { // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: name, + }) return } err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1014,7 +1040,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { outArgs = args - if strings.HasPrefix(shorthands, "test.") { + if isGotestShorthandFlag(shorthands) { return } @@ -1029,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1039,7 +1067,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = stripUnknownFlagValue(outArgs) return default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + err = f.fail(&NotExistError{ + name: string(c), + specifiedShorthands: shorthands, + messageType: flagUnknownShorthandFlagMessage, + }) return } } @@ -1062,7 +1094,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = args[1:] } else { // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: string(c), + specifiedShorthands: shorthands, + }) return } @@ -1072,7 +1108,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1135,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true - if len(arguments) < 0 { + f.args = make([]string, 0, len(arguments)) + + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1151,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1177,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/openshift/vendor/github.com/spf13/pflag/func.go b/openshift/vendor/github.com/spf13/pflag/func.go new file mode 100644 index 0000000000..9f4d88f271 --- /dev/null +++ b/openshift/vendor/github.com/spf13/pflag/func.go @@ -0,0 +1,37 @@ +package pflag + +// -- func Value +type funcValue func(string) error + +func (f funcValue) Set(s string) error { return f(s) } + +func (f funcValue) Type() string { return "func" } + +func (f funcValue) String() string { return "" } // same behavior as stdlib 'flag' package + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func (f *FlagSet) Func(name string, usage string, fn func(string) error) { + f.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) FuncP(name string, shorthand string, usage string, fn func(string) error) { + var val Value = funcValue(fn) + f.VarP(val, name, shorthand, usage) +} + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func Func(name string, usage string, fn func(string) error) { + CommandLine.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func FuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.FuncP(name, shorthand, usage, fn) +} diff --git a/openshift/vendor/github.com/spf13/pflag/golangflag.go b/openshift/vendor/github.com/spf13/pflag/golangflag.go index d3dd72b7fe..e62eab5381 100644 --- a/openshift/vendor/github.com/spf13/pflag/golangflag.go +++ b/openshift/vendor/github.com/spf13/pflag/golangflag.go @@ -8,8 +8,18 @@ import ( goflag "flag" "reflect" "strings" + "time" ) +// go test flags prefixes +func isGotestFlag(flag string) bool { + return strings.HasPrefix(flag, "-test.") +} + +func isGotestShorthandFlag(flag string) bool { + return strings.HasPrefix(flag, "test.") +} + // flagValueWrapper implements pflag.Value around a flag.Value. The main // difference here is the addition of the Type method that returns a string // name of the type. As this is generally unknown, we approximate that with @@ -103,3 +113,49 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { } f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } + +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + +// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), +// since by default those are skipped by pflag.Parse(). +// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` +func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { + var skippedFlags []string + for _, f := range osArgs { + if isGotestFlag(f) { + skippedFlags = append(skippedFlags, f) + } + } + return goFlagSet.Parse(skippedFlags) +} + diff --git a/openshift/vendor/github.com/spf13/pflag/ipnet_slice.go b/openshift/vendor/github.com/spf13/pflag/ipnet_slice.go index 6b541aa879..c6e89da18d 100644 --- a/openshift/vendor/github.com/spf13/pflag/ipnet_slice.go +++ b/openshift/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -73,7 +73,7 @@ func (s *ipNetSliceValue) String() string { func ipNetSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IPNet{}, nil } diff --git a/openshift/vendor/github.com/spf13/pflag/string_to_string.go b/openshift/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc0..1d1e3bf91a 100644 --- a/openshift/vendor/github.com/spf13/pflag/string_to_string.go +++ b/openshift/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/openshift/vendor/github.com/spf13/pflag/text.go b/openshift/vendor/github.com/spf13/pflag/text.go new file mode 100644 index 0000000000..886d5a3d80 --- /dev/null +++ b/openshift/vendor/github.com/spf13/pflag/text.go @@ -0,0 +1,81 @@ +package pflag + +import ( + "encoding" + "fmt" + "reflect" +) + +// following is copied from go 1.23.4 flag.go +type textValue struct{ p encoding.TextUnmarshaler } + +func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue { + ptrVal := reflect.ValueOf(p) + if ptrVal.Kind() != reflect.Ptr { + panic("variable value type must be a pointer") + } + defVal := reflect.ValueOf(val) + if defVal.Kind() == reflect.Ptr { + defVal = defVal.Elem() + } + if defVal.Type() != ptrVal.Type().Elem() { + panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem())) + } + ptrVal.Elem().Set(defVal) + return textValue{p} +} + +func (v textValue) Set(s string) error { + return v.p.UnmarshalText([]byte(s)) +} + +func (v textValue) Get() interface{} { + return v.p +} + +func (v textValue) String() string { + if m, ok := v.p.(encoding.TextMarshaler); ok { + if b, err := m.MarshalText(); err == nil { + return string(b) + } + } + return "" +} + +//end of copy + +func (v textValue) Type() string { + return reflect.ValueOf(v.p).Type().Name() +} + +// GetText set out, which implements encoding.UnmarshalText, to the value of a flag with given name +func (f *FlagSet) GetText(name string, out encoding.TextUnmarshaler) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag accessed but not defined: %s", name) + } + if flag.Value.Type() != reflect.TypeOf(out).Name() { + return fmt.Errorf("trying to get %s value of flag of type %s", reflect.TypeOf(out).Name(), flag.Value.Type()) + } + return out.UnmarshalText([]byte(flag.Value.String())) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, shorthand, usage) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, shorthand, usage) +} diff --git a/openshift/vendor/github.com/spf13/pflag/time.go b/openshift/vendor/github.com/spf13/pflag/time.go new file mode 100644 index 0000000000..3dee424791 --- /dev/null +++ b/openshift/vendor/github.com/spf13/pflag/time.go @@ -0,0 +1,124 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// TimeValue adapts time.Time for use as a flag. +type timeValue struct { + *time.Time + formats []string +} + +func newTimeValue(val time.Time, p *time.Time, formats []string) *timeValue { + *p = val + return &timeValue{ + Time: p, + formats: formats, + } +} + +// Set time.Time value from string based on accepted formats. +func (d *timeValue) Set(s string) error { + s = strings.TrimSpace(s) + for _, f := range d.formats { + v, err := time.Parse(f, s) + if err != nil { + continue + } + *d.Time = v + return nil + } + + formatsString := "" + for i, f := range d.formats { + if i > 0 { + formatsString += ", " + } + formatsString += fmt.Sprintf("`%s`", f) + } + + return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString) +} + +// Type name for time.Time flags. +func (d *timeValue) Type() string { + return "time" +} + +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} + +// GetTime return the time value of a flag with the given name +func (f *FlagSet) GetTime(name string) (time.Time, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return time.Time{}, err + } + + if flag.Value.Type() != "time" { + err := fmt.Errorf("trying to get %s value of flag of type %s", "time", flag.Value.Type()) + return time.Time{}, err + } + + val, ok := flag.Value.(*timeValue) + if !ok { + return time.Time{}, fmt.Errorf("value %s is not a time", flag.Value) + } + + return *val.Time, nil +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func (f *FlagSet) TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + f.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + f.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + CommandLine.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + CommandLine.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func (f *FlagSet) Time(name string, value time.Time, formats []string, usage string) *time.Time { + return f.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + p := new(time.Time) + f.TimeVarP(p, name, shorthand, value, formats, usage) + return p +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func Time(name string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, shorthand, value, formats, usage) +} diff --git a/openshift/vendor/go.uber.org/automaxprocs/.codecov.yml b/openshift/vendor/go.uber.org/automaxprocs/.codecov.yml deleted file mode 100644 index 9a2ed4a996..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/.codecov.yml +++ /dev/null @@ -1,14 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 90% # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/openshift/vendor/go.uber.org/automaxprocs/.gitignore b/openshift/vendor/go.uber.org/automaxprocs/.gitignore deleted file mode 100644 index dd7bcf5130..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -vendor - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -*.pprof -*.out -*.log -coverage.txt - -/bin -cover.out -cover.html diff --git a/openshift/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/openshift/vendor/go.uber.org/automaxprocs/CHANGELOG.md deleted file mode 100644 index f421056ae8..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/CHANGELOG.md +++ /dev/null @@ -1,52 +0,0 @@ -# Changelog - -## v1.6.0 (2024-07-24) - -- Add RoundQuotaFunc option that allows configuration of rounding - behavior for floating point CPU quota. - -## v1.5.3 (2023-07-19) - -- Fix mountinfo parsing when super options have fields with spaces. -- Fix division by zero while parsing cgroups. - -## v1.5.2 (2023-03-16) - -- Support child control cgroups -- Fix file descriptor leak -- Update dependencies - -## v1.5.1 (2022-04-06) - -- Fix cgroups v2 mountpoint detection. - -## v1.5.0 (2022-04-05) - -- Add support for cgroups v2. - -Thanks to @emadolsky for their contribution to this release. - -## v1.4.0 (2021-02-01) - -- Support colons in cgroup names. -- Remove linters from runtime dependencies. - -## v1.3.0 (2020-01-23) - -- Migrate to Go modules. - -## v1.2.0 (2018-02-22) - -- Fixed quota clamping to always round down rather than up; Rather than - guaranteeing constant throttling at saturation, instead assume that the - fractional CPU was added as a hedge for factors outside of Go's scheduler. - -## v1.1.0 (2017-11-10) - -- Log the new value of `GOMAXPROCS` rather than the current value. -- Make logs more explicit about whether `GOMAXPROCS` was modified or not. -- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. - -## v1.0.0 (2017-08-09) - -- Initial release. diff --git a/openshift/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/openshift/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md deleted file mode 100644 index e327d9aa5c..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual -identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an -appointed representative at an online or offline event. Representation of a -project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at oss-conduct@uber.com. The project -team will review and investigate all complaints, and will respond in a way -that it deems appropriate to the circumstances. The project team is obligated -to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -[http://contributor-covenant.org/version/1/4][version]. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/openshift/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/openshift/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md deleted file mode 100644 index 2b6a6040d7..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md +++ /dev/null @@ -1,81 +0,0 @@ -# Contributing - -We'd love your help improving this package! - -If you'd like to add new exported APIs, please [open an issue][open-issue] -describing your proposal — discussing API changes ahead of time makes -pull request review much smoother. In your issue, pull request, and any other -communications, please remember to treat your fellow contributors with -respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. - -Note that you'll need to sign [Uber's Contributor License Agreement][cla] -before we can accept any of your contributions. If necessary, a bot will remind -you to accept the CLA when you open your pull request. - -## Setup - -[Fork][fork], then clone the repository: - -``` -mkdir -p $GOPATH/src/go.uber.org -cd $GOPATH/src/go.uber.org -git clone git@github.com:your_github_username/automaxprocs.git -cd automaxprocs -git remote add upstream https://github.com/uber-go/automaxprocs.git -git fetch upstream -``` - -Install the test dependencies: - -``` -make dependencies -``` - -Make sure that the tests and the linters pass: - -``` -make test -make lint -``` - -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - -## Making Changes - -Start by creating a new branch for your changes: - -``` -cd $GOPATH/src/go.uber.org/automaxprocs -git checkout master -git fetch upstream -git rebase upstream/master -git checkout -b cool_new_feature -``` - -Make your changes, then ensure that `make lint` and `make test` still pass. If -you're satisfied with your changes, push them to your fork. - -``` -git push origin cool_new_feature -``` - -Then use the GitHub UI to open a pull request. - -At this point, you're waiting on us to review your changes. We *try* to respond -to issues and pull requests within a few business days, and we may suggest some -improvements or alternatives. Once your changes are approved, one of the -project maintainers will merge them. - -We're much more likely to approve your changes if you: - -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. - -[fork]: https://github.com/uber-go/automaxprocs/fork -[open-issue]: https://github.com/uber-go/automaxprocs/issues/new -[cla]: https://cla-assistant.io/uber-go/automaxprocs -[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/openshift/vendor/go.uber.org/automaxprocs/Makefile b/openshift/vendor/go.uber.org/automaxprocs/Makefile deleted file mode 100644 index 1642b71480..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -export GOBIN ?= $(shell pwd)/bin - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck - -.PHONY: build -build: - go build ./... - -.PHONY: install -install: - go mod download - -.PHONY: test -test: - go test -race ./... - -.PHONY: cover -cover: - go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html - -$(GOLINT): tools/go.mod - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): tools/go.mod - cd tools && go install honnef.co/go/tools/cmd/staticcheck@2023.1.2 - -.PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking gofmt" - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking go vet" - @go vet ./... 2>&1 | tee -a lint.log - @echo "Checking golint" - @$(GOLINT) ./... | tee -a lint.log - @echo "Checking staticcheck" - @$(STATICCHECK) ./... 2>&1 | tee -a lint.log - @echo "Checking for license headers..." - @./.build/check_license.sh | tee -a lint.log - @[ ! -s lint.log ] diff --git a/openshift/vendor/go.uber.org/automaxprocs/README.md b/openshift/vendor/go.uber.org/automaxprocs/README.md deleted file mode 100644 index bfed32adae..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Automatically set `GOMAXPROCS` to match Linux container CPU quota. - -## Installation - -`go get -u go.uber.org/automaxprocs` - -## Quick Start - -```go -import _ "go.uber.org/automaxprocs" - -func main() { - // Your application logic here. -} -``` - -# Performance -Data measured from Uber's internal load balancer. We ran the load balancer with 200% CPU quota (i.e., 2 cores): - -| GOMAXPROCS | RPS | P50 (ms) | P99.9 (ms) | -| ------------------ | --------- | -------- | ---------- | -| 1 | 28,893.18 | 1.46 | 19.70 | -| 2 (equal to quota) | 44,715.07 | 0.84 | 26.38 | -| 3 | 44,212.93 | 0.66 | 30.07 | -| 4 | 41,071.15 | 0.57 | 42.94 | -| 8 | 33,111.69 | 0.43 | 64.32 | -| Default (24) | 22,191.40 | 0.45 | 76.19 | - -When `GOMAXPROCS` is increased above the CPU quota, we see P50 decrease slightly, but see significant increases to P99. We also see that the total RPS handled also decreases. - -When `GOMAXPROCS` is higher than the CPU quota allocated, we also saw significant throttling: - -``` -$ cat /sys/fs/cgroup/cpu,cpuacct/system.slice/[...]/cpu.stat -nr_periods 42227334 -nr_throttled 131923 -throttled_time 88613212216618 -``` - -Once `GOMAXPROCS` was reduced to match the CPU quota, we saw no CPU throttling. - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -automaxprocs to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep -an eye on issues and pull requests, but you can also report any negative -conduct to oss-conduct@uber.com. That email list is a private, safe space; -even the automaxprocs maintainers don't have access, so don't hesitate to hold -us to a high standard. - -
- -Released under the [MIT License](LICENSE). - -[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg -[doc]: https://godoc.org/go.uber.org/automaxprocs -[ci-img]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/automaxprocs - - diff --git a/openshift/vendor/go.uber.org/automaxprocs/automaxprocs.go b/openshift/vendor/go.uber.org/automaxprocs/automaxprocs.go deleted file mode 100644 index 69946a3e1f..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/automaxprocs.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package automaxprocs automatically sets GOMAXPROCS to match the Linux -// container CPU quota, if any. -package automaxprocs // import "go.uber.org/automaxprocs" - -import ( - "log" - - "go.uber.org/automaxprocs/maxprocs" -) - -func init() { - maxprocs.Set(maxprocs.Logger(log.Printf)) -} diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go deleted file mode 100644 index 113555f63d..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package cgroups provides utilities to access Linux control group (CGroups) -// parameters (CPU quota, for example) for a given process. -package cgroups diff --git a/openshift/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/openshift/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go deleted file mode 100644 index e561fe60b2..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to -// match the configured Linux CPU quota. Unlike the top-level automaxprocs -// package, it lets the caller configure logging and handle errors. -package maxprocs // import "go.uber.org/automaxprocs/maxprocs" - -import ( - "os" - "runtime" - - iruntime "go.uber.org/automaxprocs/internal/runtime" -) - -const _maxProcsKey = "GOMAXPROCS" - -func currentMaxProcs() int { - return runtime.GOMAXPROCS(0) -} - -type config struct { - printf func(string, ...interface{}) - procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) - minGOMAXPROCS int - roundQuotaFunc func(v float64) int -} - -func (c *config) log(fmt string, args ...interface{}) { - if c.printf != nil { - c.printf(fmt, args...) - } -} - -// An Option alters the behavior of Set. -type Option interface { - apply(*config) -} - -// Logger uses the supplied printf implementation for log output. By default, -// Set doesn't log anything. -func Logger(printf func(string, ...interface{})) Option { - return optionFunc(func(cfg *config) { - cfg.printf = printf - }) -} - -// Min sets the minimum GOMAXPROCS value that will be used. -// Any value below 1 is ignored. -func Min(n int) Option { - return optionFunc(func(cfg *config) { - if n >= 1 { - cfg.minGOMAXPROCS = n - } - }) -} - -// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. -func RoundQuotaFunc(rf func(v float64) int) Option { - return optionFunc(func(cfg *config) { - cfg.roundQuotaFunc = rf - }) -} - -type optionFunc func(*config) - -func (of optionFunc) apply(cfg *config) { of(cfg) } - -// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning -// any error encountered and an undo function. -// -// Set is a no-op on non-Linux systems and in Linux environments without a -// configured CPU quota. -func Set(opts ...Option) (func(), error) { - cfg := &config{ - procs: iruntime.CPUQuotaToGOMAXPROCS, - roundQuotaFunc: iruntime.DefaultRoundFunc, - minGOMAXPROCS: 1, - } - for _, o := range opts { - o.apply(cfg) - } - - undoNoop := func() { - cfg.log("maxprocs: No GOMAXPROCS change to reset") - } - - // Honor the GOMAXPROCS environment variable if present. Otherwise, amend - // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is - // Linux, and guarantee a minimum value of 1. The minimum guaranteed value - // can be overridden using `maxprocs.Min()`. - if max, exists := os.LookupEnv(_maxProcsKey); exists { - cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) - return undoNoop, nil - } - - maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) - if err != nil { - return undoNoop, err - } - - if status == iruntime.CPUQuotaUndefined { - cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) - return undoNoop, nil - } - - prev := currentMaxProcs() - undo := func() { - cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) - runtime.GOMAXPROCS(prev) - } - - switch status { - case iruntime.CPUQuotaMinUsed: - cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) - case iruntime.CPUQuotaUsed: - cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) - } - - runtime.GOMAXPROCS(maxProcs) - return undo, nil -} diff --git a/openshift/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/openshift/vendor/go.uber.org/automaxprocs/maxprocs/version.go deleted file mode 100644 index cc7fc5aee1..0000000000 --- a/openshift/vendor/go.uber.org/automaxprocs/maxprocs/version.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package maxprocs - -// Version is the current package version. -const Version = "1.6.0" diff --git a/openshift/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go b/openshift/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go new file mode 100644 index 0000000000..4596c3d28d --- /dev/null +++ b/openshift/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go @@ -0,0 +1,160 @@ +package model + +import ( + "fmt" + "go/types" +) + +// InterfaceFromGoTypesType returns a pointer to an interface for the +// given interface type loaded from archive. +func InterfaceFromGoTypesType(it *types.Interface) (*Interface, error) { + intf := &Interface{} + + for i := 0; i < it.NumMethods(); i++ { + mt := it.Method(i) + // Skip unexported methods. + if !mt.Exported() { + continue + } + m := &Method{ + Name: mt.Name(), + } + + var err error + m.In, m.Variadic, m.Out, err = funcArgsFromGoTypesType(mt.Type().(*types.Signature)) + if err != nil { + return nil, fmt.Errorf("method %q: %w", mt.Name(), err) + } + + intf.AddMethod(m) + } + + return intf, nil +} + +func funcArgsFromGoTypesType(t *types.Signature) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) { + nin := t.Params().Len() + if t.Variadic() { + nin-- + } + for i := 0; i < nin; i++ { + p, err := parameterFromGoTypesType(t.Params().At(i), false) + if err != nil { + return nil, nil, nil, err + } + in = append(in, p) + } + if t.Variadic() { + p, err := parameterFromGoTypesType(t.Params().At(nin), true) + if err != nil { + return nil, nil, nil, err + } + variadic = p + } + for i := 0; i < t.Results().Len(); i++ { + p, err := parameterFromGoTypesType(t.Results().At(i), false) + if err != nil { + return nil, nil, nil, err + } + out = append(out, p) + } + return +} + +func parameterFromGoTypesType(v *types.Var, variadic bool) (*Parameter, error) { + t := v.Type() + if variadic { + t = t.(*types.Slice).Elem() + } + tt, err := typeFromGoTypesType(t) + if err != nil { + return nil, err + } + return &Parameter{Name: v.Name(), Type: tt}, nil +} + +func typeFromGoTypesType(t types.Type) (Type, error) { + if t, ok := t.(*types.Named); ok { + tn := t.Obj() + if tn.Pkg() == nil { + return PredeclaredType(tn.Name()), nil + } + return &NamedType{ + Package: tn.Pkg().Path(), + Type: tn.Name(), + }, nil + } + + // only unnamed or predeclared types after here + + // Lots of types have element types. Let's do the parsing and error checking for all of them. + var elemType Type + if t, ok := t.(interface{ Elem() types.Type }); ok { + var err error + elemType, err = typeFromGoTypesType(t.Elem()) + if err != nil { + return nil, err + } + } + + switch t := t.(type) { + case *types.Array: + return &ArrayType{ + Len: int(t.Len()), + Type: elemType, + }, nil + case *types.Basic: + return PredeclaredType(t.String()), nil + case *types.Chan: + var dir ChanDir + switch t.Dir() { + case types.RecvOnly: + dir = RecvDir + case types.SendOnly: + dir = SendDir + } + return &ChanType{ + Dir: dir, + Type: elemType, + }, nil + case *types.Signature: + in, variadic, out, err := funcArgsFromGoTypesType(t) + if err != nil { + return nil, err + } + return &FuncType{ + In: in, + Out: out, + Variadic: variadic, + }, nil + case *types.Interface: + if t.NumMethods() == 0 { + return PredeclaredType("interface{}"), nil + } + case *types.Map: + kt, err := typeFromGoTypesType(t.Key()) + if err != nil { + return nil, err + } + return &MapType{ + Key: kt, + Value: elemType, + }, nil + case *types.Pointer: + return &PointerType{ + Type: elemType, + }, nil + case *types.Slice: + return &ArrayType{ + Len: -1, + Type: elemType, + }, nil + case *types.Struct: + if t.NumFields() == 0 { + return PredeclaredType("struct{}"), nil + } + // TODO: UnsafePointer + } + + return nil, fmt.Errorf("can't yet turn %v (%T) into a model.Type", t.String(), t) +} diff --git a/openshift/vendor/go.yaml.in/yaml/v2/.travis.yml b/openshift/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 0000000000..7348c50c0c --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/openshift/vendor/go.yaml.in/yaml/v2/LICENSE similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE rename to openshift/vendor/go.yaml.in/yaml/v2/LICENSE diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/openshift/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml rename to openshift/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/openshift/vendor/go.yaml.in/yaml/v2/NOTICE similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE rename to openshift/vendor/go.yaml.in/yaml/v2/NOTICE diff --git a/openshift/vendor/go.yaml.in/yaml/v2/README.md b/openshift/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 0000000000..c9388da425 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/openshift/vendor/go.yaml.in/yaml/v2/apic.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go rename to openshift/vendor/go.yaml.in/yaml/v2/apic.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/openshift/vendor/go.yaml.in/yaml/v2/decode.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go rename to openshift/vendor/go.yaml.in/yaml/v2/decode.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/openshift/vendor/go.yaml.in/yaml/v2/emitterc.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go rename to openshift/vendor/go.yaml.in/yaml/v2/emitterc.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/openshift/vendor/go.yaml.in/yaml/v2/encode.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go rename to openshift/vendor/go.yaml.in/yaml/v2/encode.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/openshift/vendor/go.yaml.in/yaml/v2/parserc.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go rename to openshift/vendor/go.yaml.in/yaml/v2/parserc.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/openshift/vendor/go.yaml.in/yaml/v2/readerc.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go rename to openshift/vendor/go.yaml.in/yaml/v2/readerc.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/openshift/vendor/go.yaml.in/yaml/v2/resolve.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go rename to openshift/vendor/go.yaml.in/yaml/v2/resolve.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/openshift/vendor/go.yaml.in/yaml/v2/scannerc.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go rename to openshift/vendor/go.yaml.in/yaml/v2/scannerc.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/openshift/vendor/go.yaml.in/yaml/v2/sorter.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go rename to openshift/vendor/go.yaml.in/yaml/v2/sorter.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/openshift/vendor/go.yaml.in/yaml/v2/writerc.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go rename to openshift/vendor/go.yaml.in/yaml/v2/writerc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/openshift/vendor/go.yaml.in/yaml/v2/yaml.go similarity index 99% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go rename to openshift/vendor/go.yaml.in/yaml/v2/yaml.go index 30813884c0..5248e1263c 100644 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go +++ b/openshift/vendor/go.yaml.in/yaml/v2/yaml.go @@ -2,7 +2,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml +// https://github.com/yaml/go-yaml // package yaml diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/openshift/vendor/go.yaml.in/yaml/v2/yamlh.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go rename to openshift/vendor/go.yaml.in/yaml/v2/yamlh.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/openshift/vendor/go.yaml.in/yaml/v2/yamlprivateh.go similarity index 100% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go rename to openshift/vendor/go.yaml.in/yaml/v2/yamlprivateh.go diff --git a/openshift/vendor/go.yaml.in/yaml/v3/LICENSE b/openshift/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 0000000000..2683e4bb1f --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/openshift/vendor/go.yaml.in/yaml/v3/NOTICE similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE rename to openshift/vendor/go.yaml.in/yaml/v3/NOTICE diff --git a/openshift/vendor/go.yaml.in/yaml/v3/README.md b/openshift/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 0000000000..15a85a6350 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/openshift/vendor/go.yaml.in/yaml/v3/apic.go b/openshift/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 0000000000..05fd305da1 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/openshift/vendor/go.yaml.in/yaml/v3/decode.go b/openshift/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 0000000000..02e2b17bfe --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/emitterc.go b/openshift/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 0000000000..ab4e03ba72 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/encode.go b/openshift/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 0000000000..de9e72a3e6 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/parserc.go b/openshift/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 0000000000..25fe823637 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/readerc.go b/openshift/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 0000000000..56af245366 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/resolve.go b/openshift/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 0000000000..64ae888057 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/scannerc.go b/openshift/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 0000000000..30b1f08920 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/sorter.go b/openshift/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 0000000000..9210ece7e9 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/writerc.go b/openshift/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 0000000000..266d0b092c --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/yaml.go b/openshift/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 0000000000..0b101cd20d --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/yamlh.go b/openshift/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 0000000000..f59aa40f64 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/openshift/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/openshift/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 0000000000..dea1ba9610 --- /dev/null +++ b/openshift/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/openshift/vendor/golang.org/x/mod/LICENSE b/openshift/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 0000000000..2a7cf70da6 --- /dev/null +++ b/openshift/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/vendor/golang.org/x/mod/PATENTS b/openshift/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/openshift/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/openshift/vendor/golang.org/x/mod/semver/semver.go b/openshift/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 0000000000..628f8fd687 --- /dev/null +++ b/openshift/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,407 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import ( + "slices" + "strings" +) + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements [sort.Interface] for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { return compareVersion(vs[i], vs[j]) < 0 } + +// Sort sorts a list of semantic version strings using [Compare] and falls back +// to use [strings.Compare] if both versions are considered equal. +func Sort(list []string) { + slices.SortFunc(list, compareVersion) +} + +func compareVersion(a, b string) int { + cmp := Compare(a, b) + if cmp != 0 { + return cmp + } + return strings.Compare(a, b) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/openshift/vendor/golang.org/x/net/http2/frame.go b/openshift/vendor/golang.org/x/net/http2/frame.go index 97bd8b06f7..db3264da8c 100644 --- a/openshift/vendor/golang.org/x/net/http2/frame.go +++ b/openshift/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -509,7 +509,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } return nil, ErrFrameTooLarge } diff --git a/openshift/vendor/golang.org/x/net/http2/http2.go b/openshift/vendor/golang.org/x/net/http2/http2.go index 6c18ea230b..ea5ae629fd 100644 --- a/openshift/vendor/golang.org/x/net/http2/http2.go +++ b/openshift/vendor/golang.org/x/net/http2/http2.go @@ -11,8 +11,6 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( diff --git a/openshift/vendor/golang.org/x/oauth2/internal/doc.go b/openshift/vendor/golang.org/x/oauth2/internal/doc.go index 03265e888a..8c7c475f2d 100644 --- a/openshift/vendor/golang.org/x/oauth2/internal/doc.go +++ b/openshift/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/openshift/vendor/golang.org/x/oauth2/internal/oauth2.go b/openshift/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989beaf4..71ea6ad1f5 100644 --- a/openshift/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/openshift/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/openshift/vendor/golang.org/x/oauth2/internal/token.go b/openshift/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef0f..8389f24629 100644 --- a/openshift/vendor/golang.org/x/oauth2/internal/token.go +++ b/openshift/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/openshift/vendor/golang.org/x/oauth2/internal/transport.go b/openshift/vendor/golang.org/x/oauth2/internal/transport.go index b9db01ddfd..afc0aeb274 100644 --- a/openshift/vendor/golang.org/x/oauth2/internal/transport.go +++ b/openshift/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/openshift/vendor/golang.org/x/oauth2/oauth2.go b/openshift/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80..de34feb844 100644 --- a/openshift/vendor/golang.org/x/oauth2/oauth2.go +++ b/openshift/vendor/golang.org/x/oauth2/oauth2.go @@ -22,9 +22,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +46,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is @@ -135,7 +135,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } - return tk, err + return tk, nil } // reuseTokenSource is a TokenSource that holds a single token in memory @@ -305,8 +305,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -356,15 +355,19 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { return internal.ContextClient(ctx) } + cc := internal.ContextClient(ctx) return &http.Client{ Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, + Base: cc.Transport, Source: ReuseTokenSource(nil, src), }, + CheckRedirect: cc.CheckRedirect, + Jar: cc.Jar, + Timeout: cc.Timeout, } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -372,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -393,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/openshift/vendor/golang.org/x/oauth2/pkce.go b/openshift/vendor/golang.org/x/oauth2/pkce.go index 50593b6dfe..cea8374d51 100644 --- a/openshift/vendor/golang.org/x/oauth2/pkce.go +++ b/openshift/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/openshift/vendor/golang.org/x/oauth2/token.go b/openshift/vendor/golang.org/x/oauth2/token.go index 109997d77c..239ec32962 100644 --- a/openshift/vendor/golang.org/x/oauth2/token.go +++ b/openshift/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,13 +163,14 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. +// with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { diff --git a/openshift/vendor/golang.org/x/oauth2/transport.go b/openshift/vendor/golang.org/x/oauth2/transport.go index 90657915fb..8bbebbac9e 100644 --- a/openshift/vendor/golang.org/x/oauth2/transport.go +++ b/openshift/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/openshift/vendor/golang.org/x/sync/LICENSE b/openshift/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..2a7cf70da6 --- /dev/null +++ b/openshift/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/vendor/golang.org/x/sync/PATENTS b/openshift/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/openshift/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/openshift/vendor/golang.org/x/sync/errgroup/errgroup.go b/openshift/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..1d8cffae8c --- /dev/null +++ b/openshift/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,151 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. A Group should not be reused for different tasks. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to Go must happen before a Wait. +// It blocks until the new goroutine can be added without the number of +// goroutines in the group exceeding the configured limit. +// +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/openshift/vendor/golang.org/x/sys/unix/mkerrors.sh b/openshift/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c31..d1c8b2640e 100644 --- a/openshift/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/openshift/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/openshift/vendor/golang.org/x/sys/unix/syscall_darwin.go b/openshift/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3b..7838ca5db2 100644 --- a/openshift/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/openshift/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8f..b6db27d937 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,6 +319,7 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f AUDIT_IPE_ACCESS = 0x58c @@ -327,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -491,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -527,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -554,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -843,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -936,11 +942,10 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -1203,13 +1208,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1224,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1240,6 +1253,7 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 @@ -1247,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1266,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1574,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1625,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1687,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1809,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2485,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2644,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2724,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2787,7 +2813,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2864,10 +2890,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2917,11 +2945,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2970,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2987,11 +3018,12 @@ const ( RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x7f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3271,6 +3303,7 @@ const ( STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3322,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3392,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3503,6 +3534,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3515,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3559,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3673,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c7..1c37f9fbc4 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -372,6 +374,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda535..6f54d34aef 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -373,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab86..783ec5c126 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -378,6 +380,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d3..ca83d3ba16 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa9..607e611c0c 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -365,6 +367,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37a..b9cb5bd3c0 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96a..65b078a638 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902a..5298a3033d 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e27..7bc557c876 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c224272615..152399bb04 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -426,6 +428,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee13..1a1ce2409c 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941f..4231a1fb57 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc42..21c0e95266 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -362,6 +364,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb80..f00d1cd7cf 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -434,6 +436,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e57514..bc8d539e6a 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -473,6 +475,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306..aca56ee494 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e..2ea1ef58c3 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e5029744..d22c8af319 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51b..5ee264ae97 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18..f9f03ebf5f 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b..87c2118e84 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278..391ad102fb 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e6..5656157757 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc22..0482b52e3c 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb1..71806f08f3 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b446365025..e35a710582 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c188..2aea476705 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 8405391698..6c9bb4e560 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6..680bc9915a 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d..620f271052 100644 --- a/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/openshift/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe6472..cd236443f6 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -114,8 +114,10 @@ type Statx_t struct { Atomic_write_unit_min uint32 Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 _ [1]uint32 - _ [9]uint64 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -2226,8 +2229,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2314,6 +2320,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2594,8 +2605,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3802,7 +3813,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2d + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3862,17 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3949,7 +3979,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4015,7 +4050,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x6 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4101,6 +4138,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4613,6 +4663,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4623,6 +4674,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4682,6 +4734,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4717,6 +4770,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4747,9 +4801,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4774,9 +4829,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4809,12 +4867,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4943,7 +5003,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4978,6 +5040,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -5001,6 +5065,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -5032,6 +5100,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -5117,7 +5188,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5161,6 +5233,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5180,6 +5253,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5247,6 +5321,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5262,6 +5337,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5281,9 +5357,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5295,8 +5374,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5343,7 +5424,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5351,12 +5435,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5364,8 +5450,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5430,6 +5519,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5458,9 +5548,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5703,11 +5794,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5753,6 +5849,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5770,14 +5868,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5788,7 +5891,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5849,6 +5955,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -6007,6 +6114,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -6038,6 +6152,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da43f..485f2d3a1b 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e1864..ecbd1ad8bc 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108b6..02f0463a44 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f1f..6f4d400d24 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26c1..cd532cfa55 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2f1..4133620851 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d45356..eaa37eb718 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea1866..98ae6a1e4a 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c48..cae1961594 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 8359728759..6ce3b4e028 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c68..c7429c6a14 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb62b..4bf4baf4ca 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ad05b51a60..e9709d70af 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce90037..fb44268ca7 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b56739c..9c38265c74 100644 --- a/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/openshift/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/vendor/golang.org/x/term/term_windows.go b/openshift/vendor/golang.org/x/term/term_windows.go index df6bf948e1..0ddd81c02a 100644 --- a/openshift/vendor/golang.org/x/term/term_windows.go +++ b/openshift/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/openshift/vendor/golang.org/x/term/terminal.go b/openshift/vendor/golang.org/x/term/terminal.go index 13e9a64ad1..bddb2e2aeb 100644 --- a/openshift/vendor/golang.org/x/term/terminal.go +++ b/openshift/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/openshift/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/openshift/vendor/golang.org/x/tools/go/ast/edge/edge.go similarity index 100% rename from openshift/vendor/golang.org/x/tools/internal/astutil/edge/edge.go rename to openshift/vendor/golang.org/x/tools/go/ast/edge/edge.go diff --git a/openshift/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/openshift/vendor/golang.org/x/tools/go/ast/inspector/cursor.go new file mode 100644 index 0000000000..31c8d2f240 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -0,0 +1,502 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +import ( + "fmt" + "go/ast" + "go/token" + "iter" + "reflect" + + "golang.org/x/tools/go/ast/edge" +) + +// A Cursor represents an [ast.Node]. It is immutable. +// +// Two Cursors compare equal if they represent the same node. +// +// Call [Inspector.Root] to obtain a valid cursor for the virtual root +// node of the traversal. +// +// Use the following methods to navigate efficiently around the tree: +// - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing]; +// - for children, use [Cursor.Child], [Cursor.Children], +// [Cursor.FirstChild], and [Cursor.LastChild]; +// - for siblings, use [Cursor.PrevSibling] and [Cursor.NextSibling]; +// - for descendants, use [Cursor.FindByPos], [Cursor.FindNode], +// [Cursor.Inspect], and [Cursor.Preorder]. +// +// Use the [Cursor.ChildAt] and [Cursor.ParentEdge] methods for +// information about the edges in a tree: which field (and slice +// element) of the parent node holds the child. +type Cursor struct { + in *Inspector + index int32 // index of push node; -1 for virtual root node +} + +// Root returns a cursor for the virtual root node, +// whose children are the files provided to [New]. +// +// Its [Cursor.Node] and [Cursor.Stack] methods return nil. +func (in *Inspector) Root() Cursor { + return Cursor{in, -1} +} + +// At returns the cursor at the specified index in the traversal, +// which must have been obtained from [Cursor.Index] on a Cursor +// belonging to the same Inspector (see [Cursor.Inspector]). +func (in *Inspector) At(index int32) Cursor { + if index < 0 { + panic("negative index") + } + if int(index) >= len(in.events) { + panic("index out of range for this inspector") + } + if in.events[index].index < index { + panic("invalid index") // (a push, not a pop) + } + return Cursor{in, index} +} + +// Inspector returns the cursor's Inspector. +func (c Cursor) Inspector() *Inspector { return c.in } + +// Index returns the index of this cursor position within the package. +// +// Clients should not assume anything about the numeric Index value +// except that it increases monotonically throughout the traversal. +// It is provided for use with [At]. +// +// Index must not be called on the Root node. +func (c Cursor) Index() int32 { + if c.index < 0 { + panic("Index called on Root node") + } + return c.index +} + +// Node returns the node at the current cursor position, +// or nil for the cursor returned by [Inspector.Root]. +func (c Cursor) Node() ast.Node { + if c.index < 0 { + return nil + } + return c.in.events[c.index].node +} + +// String returns information about the cursor's node, if any. +func (c Cursor) String() string { + if c.in == nil { + return "(invalid)" + } + if c.index < 0 { + return "(root)" + } + return reflect.TypeOf(c.Node()).String() +} + +// indices return the [start, end) half-open interval of event indices. +func (c Cursor) indices() (int32, int32) { + if c.index < 0 { + return 0, int32(len(c.in.events)) // root: all events + } else { + return c.index, c.in.events[c.index].index + 1 // just one subtree + } +} + +// Preorder returns an iterator over the nodes of the subtree +// represented by c in depth-first order. Each node in the sequence is +// represented by a Cursor that allows access to the Node, but may +// also be used to start a new traversal, or to obtain the stack of +// nodes enclosing the cursor. +// +// The traversal sequence is determined by [ast.Inspect]. The types +// argument, if non-empty, enables type-based filtering of events. The +// function f if is called only for nodes whose type matches an +// element of the types slice. +// +// If you need control over descent into subtrees, +// or need both pre- and post-order notifications, use [Cursor.Inspect] +func (c Cursor) Preorder(types ...ast.Node) iter.Seq[Cursor] { + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain types: skip. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// Inspect visits the nodes of the subtree represented by c in +// depth-first order. It calls f(n) for each node n before it +// visits n's children. If f returns true, Inspect invokes f +// recursively for each of the non-nil children of the node. +// +// Each node is represented by a Cursor that allows access to the +// Node, but may also be used to start a new traversal, or to obtain +// the stack of nodes enclosing the cursor. +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (c Cursor) Inspect(types []ast.Node, f func(c Cursor) (descend bool)) { + mask := maskOf(types) + events := c.in.events + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 && !f(Cursor{c.in, i}) || + events[pop].typ&mask == 0 { + // The user opted not to descend, or the + // subtree does not contain types: + // skip past the pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Enclosing returns an iterator over the nodes enclosing the current +// current node, starting with the Cursor itself. +// +// Enclosing must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// The types argument, if non-empty, enables type-based filtering of +// events: the sequence includes only enclosing nodes whose type +// matches an element of the types slice. +func (c Cursor) Enclosing(types ...ast.Node) iter.Seq[Cursor] { + if c.index < 0 { + panic("Cursor.Enclosing called on Root node") + } + + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + for i := c.index; i >= 0; i = events[i].parent { + if events[i].typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + } + } +} + +// Parent returns the parent of the current node. +// +// Parent must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Parent() Cursor { + if c.index < 0 { + panic("Cursor.Parent called on Root node") + } + + return Cursor{c.in, c.in.events[c.index].parent} +} + +// ParentEdge returns the identity of the field in the parent node +// that holds this cursor's node, and if it is a list, the index within it. +// +// For example, f(x, y) is a CallExpr whose three children are Idents. +// f has edge kind [edge.CallExpr_Fun] and index -1. +// x and y have kind [edge.CallExpr_Args] and indices 0 and 1, respectively. +// +// If called on a child of the Root node, it returns ([edge.Invalid], -1). +// +// ParentEdge must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) ParentEdge() (edge.Kind, int) { + if c.index < 0 { + panic("Cursor.ParentEdge called on Root node") + } + events := c.in.events + pop := events[c.index].index + return unpackEdgeKindAndIndex(events[pop].parent) +} + +// ChildAt returns the cursor for the child of the +// current node identified by its edge and index. +// The index must be -1 if the edge.Kind is not a slice. +// The indicated child node must exist. +// +// ChildAt must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// Invariant: c.Parent().ChildAt(c.ParentEdge()) == c. +func (c Cursor) ChildAt(k edge.Kind, idx int) Cursor { + target := packEdgeKindAndIndex(k, idx) + + // Unfortunately there's no shortcut to looping. + events := c.in.events + i := c.index + 1 + for { + pop := events[i].index + if pop < i { + break + } + if events[pop].parent == target { + return Cursor{c.in, i} + } + i = pop + 1 + } + panic(fmt.Sprintf("ChildAt(%v, %d): no such child of %v", k, idx, c)) +} + +// Child returns the cursor for n, which must be a direct child of c's Node. +// +// Child must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Child(n ast.Node) Cursor { + if c.index < 0 { + panic("Cursor.Child called on Root node") + } + + if false { + // reference implementation + for child := range c.Children() { + if child.Node() == n { + return child + } + } + + } else { + // optimized implementation + events := c.in.events + for i := c.index + 1; events[i].index > i; i = events[i].index + 1 { + if events[i].node == n { + return Cursor{c.in, i} + } + } + } + panic(fmt.Sprintf("Child(%T): not a child of %v", n, c)) +} + +// NextSibling returns the cursor for the next sibling node in the same list +// (for example, of files, decls, specs, statements, fields, or expressions) as +// the current node. It returns (zero, false) if the node is the last node in +// the list, or is not part of a list. +// +// NextSibling must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) NextSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.NextSibling called on Root node") + } + + events := c.in.events + i := events[c.index].index + 1 // after corresponding pop + if i < int32(len(events)) { + if events[i].index > i { // push? + return Cursor{c.in, i}, true + } + } + return Cursor{}, false +} + +// PrevSibling returns the cursor for the previous sibling node in the +// same list (for example, of files, decls, specs, statements, fields, +// or expressions) as the current node. It returns zero if the node is +// the first node in the list, or is not part of a list. +// +// It must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) PrevSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.PrevSibling called on Root node") + } + + events := c.in.events + i := c.index - 1 + if i >= 0 { + if j := events[i].index; j < i { // pop? + return Cursor{c.in, j}, true + } + } + return Cursor{}, false +} + +// FirstChild returns the first direct child of the current node, +// or zero if it has no children. +func (c Cursor) FirstChild() (Cursor, bool) { + events := c.in.events + i := c.index + 1 // i=0 if c is root + if i < int32(len(events)) && events[i].index > i { // push? + return Cursor{c.in, i}, true + } + return Cursor{}, false +} + +// LastChild returns the last direct child of the current node, +// or zero if it has no children. +func (c Cursor) LastChild() (Cursor, bool) { + events := c.in.events + if c.index < 0 { // root? + if len(events) > 0 { + // return push of final event (a pop) + return Cursor{c.in, events[len(events)-1].index}, true + } + } else { + j := events[c.index].index - 1 // before corresponding pop + // Inv: j == c.index if c has no children + // or j is last child's pop. + if j > c.index { // c has children + return Cursor{c.in, events[j].index}, true + } + } + return Cursor{}, false +} + +// Children returns an iterator over the direct children of the +// current node, if any. +// +// When using Children, NextChild, and PrevChild, bear in mind that a +// Node's children may come from different fields, some of which may +// be lists of nodes without a distinguished intervening container +// such as [ast.BlockStmt]. +// +// For example, [ast.CaseClause] has a field List of expressions and a +// field Body of statements, so the children of a CaseClause are a mix +// of expressions and statements. Other nodes that have "uncontained" +// list fields include: +// +// - [ast.ValueSpec] (Names, Values) +// - [ast.CompositeLit] (Type, Elts) +// - [ast.IndexListExpr] (X, Indices) +// - [ast.CallExpr] (Fun, Args) +// - [ast.AssignStmt] (Lhs, Rhs) +// +// So, do not assume that the previous sibling of an ast.Stmt is also +// an ast.Stmt, or if it is, that they are executed sequentially, +// unless you have established that, say, its parent is a BlockStmt +// or its [Cursor.ParentEdge] is [edge.BlockStmt_List]. +// For example, given "for S1; ; S2 {}", the predecessor of S2 is S1, +// even though they are not executed in sequence. +func (c Cursor) Children() iter.Seq[Cursor] { + return func(yield func(Cursor) bool) { + c, ok := c.FirstChild() + for ok && yield(c) { + c, ok = c.NextSibling() + } + } +} + +// Contains reports whether c contains or is equal to c2. +// +// Both Cursors must belong to the same [Inspector]; +// neither may be its Root node. +func (c Cursor) Contains(c2 Cursor) bool { + if c.in != c2.in { + panic("different inspectors") + } + events := c.in.events + return c.index <= c2.index && events[c2.index].index <= events[c.index].index +} + +// FindNode returns the cursor for node n if it belongs to the subtree +// rooted at c. It returns zero if n is not found. +func (c Cursor) FindNode(n ast.Node) (Cursor, bool) { + + // FindNode is equivalent to this code, + // but more convenient and 15-20% faster: + if false { + for candidate := range c.Preorder(n) { + if candidate.Node() == n { + return candidate, true + } + } + return Cursor{}, false + } + + // TODO(adonovan): opt: should we assume Node.Pos is accurate + // and combine type-based filtering with position filtering + // like FindByPos? + + mask := maskOf([]ast.Node{n}) + events := c.in.events + + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && ev.node == n { + return Cursor{c.in, i}, true + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain type of n: skip. + i = pop + } + } + } + return Cursor{}, false +} + +// FindByPos returns the cursor for the innermost node n in the tree +// rooted at c such that n.Pos() <= start && end <= n.End(). +// (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.) +// +// It returns zero if none is found. +// Precondition: start <= end. +// +// See also [astutil.PathEnclosingInterval], which +// tolerates adjoining whitespace. +func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { + if end < start { + panic("end < start") + } + events := c.in.events + + // This algorithm could be implemented using c.Inspect, + // but it is about 2.5x slower. + + best := int32(-1) // push index of latest (=innermost) node containing range + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + n := ev.node + var nodeEnd token.Pos + if file, ok := n.(*ast.File); ok { + nodeEnd = file.FileEnd + // Note: files may be out of Pos order. + if file.FileStart > start { + i = ev.index // disjoint, after; skip to next file + continue + } + } else { + nodeEnd = n.End() + if n.Pos() > start { + break // disjoint, after; stop + } + } + // Inv: node.{Pos,FileStart} <= start + if end <= nodeEnd { + // node fully contains target range + best = i + } else if nodeEnd < start { + i = ev.index // disjoint, before; skip forward + } + } + } + if best >= 0 { + return Cursor{c.in, best}, true + } + return Cursor{}, false +} diff --git a/openshift/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/openshift/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 674490a65b..a703cdfcf9 100644 --- a/openshift/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/openshift/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -13,10 +13,19 @@ // This representation is sometimes called a "balanced parenthesis tree." // // Experiments suggest the inspector's traversals are about 2.5x faster -// than ast.Inspect, but it may take around 5 traversals for this +// than [ast.Inspect], but it may take around 5 traversals for this // benefit to amortize the inspector's construction cost. // If efficiency is the primary concern, do not use Inspector for // one-off traversals. +// +// The [Cursor] type provides a more flexible API for efficient +// navigation of syntax trees in all four "cardinal directions". For +// example, traversals may be nested, so you can find each node of +// type A and then search within it for nodes of type B. Or you can +// traverse from a node to its immediate neighbors: its parent, its +// previous and next sibling, or its first and last child. We +// recommend using methods of Cursor in preference to Inspector where +// possible. package inspector // There are four orthogonal features in a traversal: @@ -37,9 +46,8 @@ package inspector import ( "go/ast" - _ "unsafe" - "golang.org/x/tools/internal/astutil/edge" + "golang.org/x/tools/go/ast/edge" ) // An Inspector provides methods for inspecting @@ -48,18 +56,12 @@ type Inspector struct { events []event } -//go:linkname events golang.org/x/tools/go/ast/inspector.events -func events(in *Inspector) []event { return in.events } - -//go:linkname packEdgeKindAndIndex golang.org/x/tools/go/ast/inspector.packEdgeKindAndIndex func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { return int32(uint32(index+1)<<7 | uint32(ek)) } // unpackEdgeKindAndIndex unpacks the edge kind and edge index (within // an []ast.Node slice) from the parent field of a pop event. -// -//go:linkname unpackEdgeKindAndIndex golang.org/x/tools/go/ast/inspector.unpackEdgeKindAndIndex func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { // The "parent" field of a pop node holds the // edge Kind in the lower 7 bits and the index+1 @@ -83,15 +85,21 @@ type event struct { // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). // Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits // n's children. // -// The complete traversal sequence is determined by ast.Inspect. +// The complete traversal sequence is determined by [ast.Inspect]. // The types argument, if non-empty, enables type-based filtering of // events. The function f is called only for nodes whose type // matches an element of the types slice. +// +// The [Cursor.Preorder] method provides a richer alternative interface. +// Example: +// +// for c := range in.Root().Preorder(types) { ... } func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // Because it avoids postorder calls to f, and the pruning // check, Preorder is almost twice as fast as Nodes. The two @@ -131,10 +139,18 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // of the non-nil children of the node, followed by a call of // f(n, false). // -// The complete traversal sequence is determined by ast.Inspect. +// The complete traversal sequence is determined by [ast.Inspect]. // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// ... +// return true +// } func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) for i := int32(0); i < int32(len(in.events)); { @@ -168,6 +184,15 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc // supplies each call to f an additional argument, the current // traversal stack. The stack's first element is the outermost node, // an *ast.File; its last is the innermost, n. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// stack := slices.Collect(c.Enclosing()) +// ... +// return true +// }) func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node @@ -233,7 +258,7 @@ type visitor struct { type item struct { index int32 // index of current node's push event parentIndex int32 // index of parent node's push event - typAccum uint64 // accumulated type bits of current node's descendents + typAccum uint64 // accumulated type bits of current node's descendants edgeKindAndIndex int32 // edge.Kind and index, bit packed } diff --git a/openshift/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/openshift/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index e936c67c98..9852331a3d 100644 --- a/openshift/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/openshift/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - _ "unsafe" ) const ( @@ -217,7 +215,6 @@ func typeOf(n ast.Node) uint64 { return 0 } -//go:linkname maskOf golang.org/x/tools/go/ast/inspector.maskOf func maskOf(nodes []ast.Node) uint64 { if len(nodes) == 0 { return math.MaxUint64 // match all node types diff --git a/openshift/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/openshift/vendor/golang.org/x/tools/go/ast/inspector/walk.go index 5a42174a0a..5f1c93c8a7 100644 --- a/openshift/vendor/golang.org/x/tools/go/ast/inspector/walk.go +++ b/openshift/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -13,7 +13,7 @@ import ( "fmt" "go/ast" - "golang.org/x/tools/internal/astutil/edge" + "golang.org/x/tools/go/ast/edge" ) func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { diff --git a/openshift/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/openshift/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 0000000000..7b90bc9235 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,236 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/token" + "go/types" + "io" + "os/exec" + + "golang.org/x/tools/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the go command. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func Find(importPath, srcDir string) (filename, path string) { + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.Output() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + size, err := gcimporter.FindExportData(buf) + if err != nil { + return nil, err + } + + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil +} + +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'u': + // unified, produced by cmd/compile since go1.20 + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := min(len(data), 10) + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/openshift/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/openshift/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 0000000000..37a7247e26 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/openshift/vendor/golang.org/x/tools/go/packages/doc.go b/openshift/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 0000000000..366aab6b2c --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,253 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. + +All patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypesInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in [LoadFiles] mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to Load, so that it can interpret them +according to the conventions of the underlying build system. + +See the Example function for typical usage. +See also [golang.org/x/tools/go/packages/internal/linecount] +for an example application. + +# The driver protocol + +Load may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/openshift/vendor/golang.org/x/tools/go/packages/external.go b/openshift/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 0000000000..f37bc65100 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,153 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "slices" + "strings" +) + +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. +// +// See the package documentation for an overview. +type DriverRequest struct { + Mode LoadMode `json:"mode"` + + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + + // Overlay maps file paths (relative to the driver's working directory) + // to the contents of overlay files (see Config.Overlay). + Overlay map[string][]byte `json:"overlay"` +} + +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found. +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val, ok := strings.CutPrefix(env, toolPrefix); ok { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, patterns []string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) + cmd.Dir = cfg.Dir + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd stdlib has a special feature where if the + // cwd and the PWD are the same node then it trusts + // the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go + // command. + // + // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) + cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response DriverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/openshift/vendor/golang.org/x/tools/go/packages/golist.go b/openshift/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 0000000000..89f89dd2dc --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1092 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a DriverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *DriverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &DriverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } + r.dr.GoVersion = dr.GoVersion +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, + } + + // Fill in response.Sizes asynchronously if necessary. + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { + errCh := make(chan error) + go func() { + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) + response.dr.Compiler = compiler + response.dr.Arch = arch + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // (We may yet return an error due to defer.) + return response.dr, nil +} + +// abs returns an absolute representation of path, based on cfg.Dir. +func (cfg *Config) abs(path string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + // In case cfg.Dir is relative, pass it to filepath.Abs. + return filepath.Abs(filepath.Join(cfg.Dir, path)) +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := state.cfg.abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Target string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) + if err != nil { + return nil, err + } + + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + response := &DriverResponse{ + GoVersion: goVersion, + } + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + Dir: p.Dir, + Target: p.Target, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + ForTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +// getGoVersion returns the effective minor version of the go command. +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + if !filepath.IsAbs(dir) { + panic("non-absolute dir passed to getPkgPath") + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(dir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&NeedForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", jsonFlag(cfg, goVersion), + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + Overlay: state.overlay, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + inv.Verb = verb + inv.Args = args + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} + +// getSizesForArgs queries 'go list' for the appropriate +// Compiler and GOARCH arguments to pass to [types.SizesFor]. +func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/openshift/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/openshift/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 0000000000..d9d5a45cd4 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "path/filepath" + + "golang.org/x/tools/internal/gocommand" +) + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := state.cfg.abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} diff --git a/openshift/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/openshift/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 0000000000..69eec9f44d --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedForTest, "NeedForTest"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, +} + +func (mode LoadMode) String() string { + if mode == 0 { + return "LoadMode(0)" + } + var out []string + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) + } + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) + } + out = append(out, fmt.Sprintf("%#x", int(mode))) + } + if len(out) == 1 { + return out[0] + } + return "(" + strings.Join(out, "|") + ")" +} diff --git a/openshift/vendor/golang.org/x/tools/go/packages/packages.go b/openshift/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 0000000000..060ab08efb --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1559 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// +// ID and Errors (if present) will always be filled. +// [Load] may return more information than requested. +// +// The Mode flag is a union of several bits named NeedName, +// NeedFiles, and so on, each of which determines whether +// a given field of Package (Name, Files, etc) should be +// populated. +// +// For convenience, we provide named constants for the most +// common combinations of Need flags: +// +// [LoadFiles] lists of files in each package +// [LoadImports] ... plus imports +// [LoadTypes] ... plus type information +// [LoadSyntax] ... plus type-annotated syntax +// [LoadAllSyntax] ... for all dependencies +// +// Unfortunately there are a number of open bugs related to +// interactions among the LoadMode bits: +// - https://go.dev/issue/56633 +// - https://go.dev/issue/56677 +// - https://go.dev/issue/58726 +// - https://go.dev/issue/63517 +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportFile adds ExportFile. + NeedExportFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax and Fset. + NeedSyntax + + // NeedTypesInfo adds TypesInfo and Fset. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // NeedForTest adds ForTest. + // + // Tests must also be set on the context for this field to be populated. + NeedForTest + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns + + // NeedTarget adds Target. + NeedTarget + + // Be sure to update loadmode_string.go when adding new items! +) + +const ( + // LoadFiles loads the name and file names for the initial packages. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // LoadImports loads the name, file names, and import mapping for the initial packages. + LoadImports = LoadFiles | NeedImports + + // LoadTypes loads exported type information for the initial packages. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // LoadSyntax loads typed syntax for the initial packages. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline + NeedExportsFile = NeedExportFile +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// +// Calls to [Load] do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // Cancelling the context may cause [Load] to abort and + // return an error. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...any) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay is a mapping from absolute file paths to file contents. + // + // For each map entry, [Load] uses the alternative file + // contents provided by the overlay mapping instead of reading + // from the file system. This mechanism can be used to enable + // editor-integrated tools to correctly analyze the contents + // of modified but unsaved buffers, for example. + // + // The overlay mapping is passed to the build system's driver + // (see "The driver protocol") so that it too can report + // consistent package metadata about unsaved files. However, + // drivers may vary in their level of support for overlays. + Overlay map[string][]byte +} + +// Load loads and returns the Go packages named by the given patterns. +// +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. +// +// The [Config.Mode] field is a set of bits that determine what kinds +// of information should be computed and returned. Modes that require +// more information tend to be slower. See [LoadMode] for details +// and important caveats. Its zero value is equivalent to +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. +// +// Each call to Load returns a new set of [Package] instances. +// The Packages and their Imports form a directed acyclic graph. +// +// If the [NeedTypes] mode flag was set, each call to Load uses a new +// [types.Importer], so [types.Object] and [types.Type] values from +// different calls to Load must not be mixed as they will have +// inconsistent notions of type identity. +// +// If any of the patterns was invalid as defined by the +// underlying build system, Load returns an error. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The [PrintErrors] function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) + if err != nil { + return nil, err + } + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { + const ( + // windowsArgMax specifies the maximum command line length for + // the Windows' CreateProcess function. + windowsArgMax = 32767 + // maxEnvSize is a very rough estimation of the maximum environment + // size of a user. + maxEnvSize = 16384 + // safeArgMax specifies the maximum safe command line length to use + // by the underlying driver excl. the environment. We choose the Windows' + // ARG_MAX as the starting point because it's one of the lowest ARG_MAX + // constants out of the different supported platforms, + // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results. + safeArgMax = windowsArgMax - maxEnvSize + ) + chunks, err := splitIntoChunks(patterns, safeArgMax) + if err != nil { + return nil, false, err + } + + if driver := findExternalDriver(cfg); driver != nil { + response, err := callDriverOnChunks(driver, cfg, chunks) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // not handled: fall through + } + + // go list fallback + + // Write overlays once, as there are many calls + // to 'go list' (one per chunk plus others too). + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + if err != nil { + return nil, false, err + } + defer cleanupOverlay() + + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) + if err != nil { + return nil, false, err + } + return response, false, err +} + +// splitIntoChunks chunks the slice so that the total number of characters +// in a chunk is no longer than argMax. +func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { + if argMax <= 0 { + return nil, errors.New("failed to split patterns into chunks, negative safe argMax value") + } + var chunks [][]string + charsInChunk := 0 + nextChunkStart := 0 + for i, v := range patterns { + vChars := len(v) + if vChars > argMax { + // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen + return nil, errors.New("failed to split patterns into chunks, a pattern is too long") + } + charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too + if charsInChunk > argMax { + chunks = append(chunks, patterns[nextChunkStart:i]) + nextChunkStart = i + charsInChunk = vChars + } + } + // add the last chunk + if nextChunkStart < len(patterns) { + chunks = append(chunks, patterns[nextChunkStart:]) + } + return chunks, nil +} + +func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { + if len(chunks) == 0 { + return driver(cfg, nil) + } + responses := make([]*DriverResponse, len(chunks)) + errNotHandled := errors.New("driver returned NotHandled") + var g errgroup.Group + for i, chunk := range chunks { + g.Go(func() (err error) { + responses[i], err = driver(cfg, chunk) + if responses[i] != nil && responses[i].NotHandled { + err = errNotHandled + } + return err + }) + } + if err := g.Wait(); err != nil { + if errors.Is(err, errNotHandled) { + return &DriverResponse{NotHandled: true}, nil + } + return nil, err + } + return mergeResponses(responses...), nil +} + +func mergeResponses(responses ...*DriverResponse) *DriverResponse { + if len(responses) == 0 { + return nil + } + response := newDeduper() + response.dr.NotHandled = false + response.dr.Compiler = responses[0].Compiler + response.dr.Arch = responses[0].Arch + response.dr.GoVersion = responses[0].GoVersion + for _, v := range responses { + response.addAll(v) + } + return response.dr +} + +// A Package describes a loaded Go package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Dir is the directory associated with the package, if it exists. + // + // For packages listed by the go command, this is the directory containing + // the package files. + Dir string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + + // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Module is the module information for the package if it exists. + // + // Note: it may be missing for std and cmd; see Go issue #65816. + Module *Module + + // -- The following fields are not part of the driver JSON schema. -- + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + // + // Each call to [Load] returns a consistent set of type + // symbols, as defined by the comment at [types.Identical]. + // Avoid mixing type information from two or more calls to [Load]. + Types *types.Package `json:"-"` + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet `json:"-"` + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool `json:"-"` + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + // + // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are + // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. + Syntax []*ast.File `json:"-"` + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info `json:"-"` + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes `json:"-"` + + // -- internal -- + + // ForTest is the package under test, if any. + ForTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError +} + +// Module provides module information for a package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, + IgnoredFiles: flat.IgnoredFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage // keyed by Package.ID + Config + sizes types.Sizes // non-nil if needed by mode + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...any) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { + roots := response.Roots + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range response.Packages { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + goVersion: response.GoVersion, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { + panic("internal error: grey node") + } + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } + + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) + } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black + } + + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) + } + + return lpkg.needsrc + } + + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(nil, lpkg) + } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } + + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled + } + } + + // If the context is done, return its error and + // throw out [likely] incomplete packages. + if err := ld.Context.Err(); err != nil { + return nil, err + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { + ld.pkgs[i].Fset = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadPackage loads/parses/typechecks the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Start shutting down if the context is done and do not load + // source or export data files. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. + if !lpkg.needsrc { + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { + return + } + + // Start shutting down if the context is done and do not type check. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, // may be nil + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + tc.GoVersion = "go" + lpkg.Module.GoVersion + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + lpkg.importErrors = nil // no longer needed + + // In go/types go1.21 and go1.22, Checker.Files failed fast with a + // a "too new" error, without calling tc.Error and without + // proceeding to type-check the package (#66525). + // We rely on the runtimeVersion error to give the suggested remedy. + if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 { + if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") { + appendError(types.Error{ + Fset: ld.Fset, + Pos: lpkg.Syntax[0].Package, + Msg: msg, + }) + } + } + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // If types.Checker.Files had an error that was unreported, + // make sure to report the unknown error so the package is illTyped. + if typErr != nil && len(lpkg.Errors) == 0 { + appendError(typErr) + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). + if sameFile(f, filename) { + src = contents + break + } + } + var err error + if src == nil { + ioLimit <- unit{} // acquire a token + src, err = os.ReadFile(filename) + <-ioLimit // release a token + } + if err != nil { + v.err = err + } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { + parsed[i], errors[i] = ld.parseFile(filename) + return nil + }) + } + g.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData ensures that type information is present for the specified +// package, loading it from an export data file on the first request. +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } + if viewLen != len(view) { + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + return nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. + loadMode |= NeedImports + } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} + +type unit struct{} diff --git a/openshift/vendor/golang.org/x/tools/go/packages/visit.go b/openshift/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 0000000000..df14ffd94d --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + errModules := make(map[*Module]bool) + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } + }) + return n +} diff --git a/openshift/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/openshift/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 0000000000..d3c2913bef --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,817 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" +) + +// TODO(adonovan): think about generic aliases. + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTrCa]; +// two of these ({,Recv}TypeParams) require an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + objs := enc.scopeObjects(scope) + for _, o := range objs { + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, o.Name()...) + path = append(path, opType) + + T := o.Type() + if alias, ok := T.(*types.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { + return Path(r), nil + } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { + return Path(r), nil + } + + } else if tname.IsAlias() { + // legacy alias + if r := find(obj, T, path); r != nil { + return Path(r), nil + } + + } else if named, ok := T.(*types.Named); ok { + // defined (named) type + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, o := range objs { + path := append(empty, o.Name()...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType)); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { + path = append(path, opType) + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType)); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if meth.Origin() != meth { + return "", false + } + + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { + switch T := T.(type) { + case *types.Alias: + return f.find(types.Unalias(T), path) + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return f.find(T.Elem(), append(path, opElem)) + case *types.Slice: + return f.find(T.Elem(), append(path, opElem)) + case *types.Array: + return f.find(T.Elem(), append(path, opElem)) + case *types.Chan: + return f.find(T.Elem(), append(path, opElem)) + case *types.Map: + if r := f.find(T.Key(), append(path, opKey)); r != nil { + return r + } + return f.find(T.Elem(), append(path, opElem)) + case *types.Signature: + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { + return r + } + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { + return r + } + if r := f.find(T.Params(), append(path, opParams)); r != nil { + return r + } + return f.find(T.Results(), append(path, opResults)) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == f.obj { + return path2 // found field var + } + if r := f.find(fld.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == f.obj { + return path2 // found param/result var + } + if r := f.find(v.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + if f.seenMethods[m] { + return nil + } + path2 := appendOpArg(path, opMethod, i) + if m == f.obj { + return path2 // found interface method + } + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.TypeParam: + name := T.Obj() + if f.seenTParamNames[name] { + return nil + } + if name == f.obj { + return append(path, opObj) + } + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) + } + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, op, i) + if r := f.find(tparam, path2); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + pathstr := string(p) + if pathstr == "" { + return nil, fmt.Errorf("empty path") + } + + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *types.TypeParamList + } + // abstraction of *types.{Alias,Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFMTr] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + t = types.Unalias(t) + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opRhs: + if alias, ok := t.(*types.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opRecvTypeParam: + sig, ok := t.(*types.Signature) // Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + rtparams := sig.RecvTypeParams() + if n := rtparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = rtparams.At(index) + + case opConstraint: + tparam, ok := t.(*types.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj == nil { + panic(p) // path does not end in an object-valued operator + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m + } + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs + } + return objs +} diff --git a/openshift/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/openshift/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 0000000000..5f10f56cba --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + _ "unsafe" // for linkname +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +// +// Note: for calls of instantiated functions and methods, Callee returns +// the corresponding generic function or method on the generic type. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + obj := info.Uses[usedIdent(info, call.Fun)] + if obj == nil { + return nil + } + if _, ok := obj.(*types.TypeName); ok { + return nil + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + obj := info.Uses[usedIdent(info, call.Fun)] + fn, _ := obj.(*types.Func) + if fn == nil || interfaceMethod(fn) { + return nil + } + return fn +} + +// usedIdent is the implementation of [internal/typesinternal.UsedIdent]. +// It returns the identifier associated with e. +// See typesinternal.UsedIdent for a fuller description. +// This function should live in typesinternal, but cannot because it would +// create an import cycle. +// +//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent +func usedIdent(info *types.Info, e ast.Expr) *ast.Ident { + if info.Types == nil || info.Uses == nil { + panic("one of info.Types or info.Uses is nil; both must be populated") + } + // Look through type instantiation if necessary. + switch d := ast.Unparen(e).(type) { + case *ast.IndexExpr: + if info.Types[d.Index].IsType() { + e = d.X + } + case *ast.IndexListExpr: + e = d.X + } + + switch e := ast.Unparen(e).(type) { + // info.Uses always has the object we want, even for selector expressions. + // We don't need info.Selections. + // See go/types/recording.go:recordSelection. + case *ast.Ident: + return e + case *ast.SelectorExpr: + return e.Sel + } + return nil +} + +// interfaceMethod reports whether its argument is a method of an interface. +// This function should live in typesinternal, but cannot because it would create an import cycle. +// +//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod +func interfaceMethod(f *types.Func) bool { + recv := f.Signature().Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/openshift/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/openshift/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 0000000000..b81ce0c330 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/openshift/vendor/golang.org/x/tools/go/types/typeutil/map.go b/openshift/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 0000000000..b6d542c64e --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,475 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil + +import ( + "bytes" + "fmt" + "go/types" + "hash/maphash" + "unsafe" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. +type Map struct { + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher has no effect. +// +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + hash := hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +// -- Hasher -- + +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) +} + +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + return hasher{inGenericSig: false}.hash(t) +} + +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + tparams := t.TypeParams() + if n := tparams.Len(); n > 0 { + h.inGenericSig = true // affects constraints, params, and results + + for i := range n { + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) + + case *types.Named: + hash := h.hashTypeName(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) + } + return hash +} + +func (h hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) + } + return 9173 + 3*uint32(t.Index()) +} + +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + // TODO(adonovan): or will, when it becomes available in go1.24. + // In the meantime we use the pointer's numeric value. + // + // hash := maphash.Comparable(theSeed, tname) + // + // (Another approach would be to hash the name and package + // path, and whether or not it is a package-level typename. It + // is rare for a package to define multiple local types with + // the same name.) + ptr := uintptr(unsafe.Pointer(tname)) + if unsafe.Sizeof(ptr) == 8 { + hash := uint64(ptr) + return uint32(hash ^ (hash >> 32)) + } else { + return uint32(ptr) + } +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashTypeName(t.Obj()) + + case *types.TypeParam: + return h.hashTypeParam(t) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/openshift/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/openshift/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 0000000000..f7666028fe --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/openshift/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/openshift/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 0000000000..9dda6a25df --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/openshift/vendor/golang.org/x/tools/internal/aliases/aliases.go b/openshift/vendor/golang.org/x/tools/internal/aliases/aliases.go new file mode 100644 index 0000000000..b9425f5a20 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -0,0 +1,38 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/token" + "go/types" +) + +// Package aliases defines backward compatible shims +// for the types.Alias type representation added in 1.22. +// This defines placeholders for x/tools until 1.26. + +// NewAlias creates a new TypeName in Package pkg that +// is an alias for the type rhs. +// +// The enabled parameter determines whether the resulting [TypeName]'s +// type is an [types.Alias]. Its value must be the result of a call to +// [Enabled], which computes the effective value of +// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled +// function is expensive and should be called once per task (e.g. +// package import), not once per call to NewAlias. +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { + if enabled { + tname := types.NewTypeName(pos, pkg, name, nil) + SetTypeParams(types.NewAlias(tname, rhs), tparams) + return tname + } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } + return types.NewTypeName(pos, pkg, name, rhs) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/openshift/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go new file mode 100644 index 0000000000..7716a3331d --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" +) + +// Rhs returns the type on the right-hand side of the alias declaration. +func Rhs(alias *types.Alias) types.Type { + if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { + return alias.Rhs() // go1.23+ + } + + // go1.22's Alias didn't have the Rhs method, + // so Unalias is the best we can do. + return types.Unalias(alias) +} + +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *types.Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *types.Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) +} + +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *types.Alias) *types.Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) +} + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// This function is expensive! Call it sparingly. +func Enabled() bool { + // The only reliable way to compute the answer is to invoke go/types. + // We don't parse the GODEBUG environment variable, because + // (a) it's tricky to do so in a manner that is consistent + // with the godebug package; in particular, a simple + // substring check is not good enough. The value is a + // rightmost-wins list of options. But more importantly: + // (b) it is impossible to detect changes to the effective + // setting caused by os.Setenv("GODEBUG"), as happens in + // many tests. Therefore any attempt to cache the result + // is just incorrect. + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) + return enabled +} diff --git a/openshift/vendor/golang.org/x/tools/internal/event/core/event.go b/openshift/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 0000000000..a6cf0e64a4 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/openshift/vendor/golang.org/x/tools/internal/event/core/export.go b/openshift/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 0000000000..05f3a9a579 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/event/core/fast.go b/openshift/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 0000000000..06c1d4615e --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/event/doc.go b/openshift/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 0000000000..5dc6e6babe --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/openshift/vendor/golang.org/x/tools/internal/event/event.go b/openshift/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 0000000000..4d55e577d1 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/openshift/vendor/golang.org/x/tools/internal/event/keys/keys.go b/openshift/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 0000000000..4cfa51b612 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) any { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) any { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/openshift/vendor/golang.org/x/tools/internal/event/keys/standard.go b/openshift/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 0000000000..7e95866592 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/openshift/vendor/golang.org/x/tools/internal/event/keys/util.go b/openshift/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 0000000000..c0e8e731c9 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/openshift/vendor/golang.org/x/tools/internal/event/label/label.go b/openshift/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 0000000000..92a3910573 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,214 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "slices" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped any +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() any { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + if slices.Contains(f.keys, l.Key()) { + return Label{} + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/bimport.go new file mode 100644 index 0000000000..734c46198d --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sync" +) + +func errorf(format string, args ...any) { + panic(fmt.Sprintf(format, args...)) +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*fileInfo +} + +type fileInfo struct { + file *token.File + lastline int +} + +const maxlines = 64 * 1024 + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we reserve maxlines + // positions per file. We delay calling token.File.SetLines until all + // positions have been calculated (by way of fakeFileSet.setLines), so that + // we can avoid setting unnecessary lines. See also golang/go#46586. + f := s.files[file] + if f == nil { + f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} + s.files[file] = f + } + if line > maxlines { + line = 1 + } + if line > f.lastline { + f.lastline = line + } + + // Return a fake position assuming that f.file consists only of newlines. + return token.Pos(f.file.Base() + line - 1) +} + +func (s *fakeFileSet) setLines() { + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + for _, f := range s.files { + f.file.SetLines(fakeLines[:f.lastline]) + } +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go new file mode 100644 index 0000000000..5662a311da --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -0,0 +1,421 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. + +package gcimporter + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "go/build" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying cmd/compile created archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. +// This returns the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) + if err != nil { + return + } + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') + if err != nil { + return + } + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + return + } + + return +} + +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. +// +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≤ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) + return + } + + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") + return + } + + return +} + +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte + + // objapi header should be the first line + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + objapi = string(line) + + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) + return + } + + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { + return + } + if string(line) == "$$" { + return // stop + } + + // read next header + line, err = r.ReadSlice('\n') + if err != nil { + return + } + headers = append(headers, string(line)) + } +} + +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { + return + } + + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { + return + } + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + + n = len(hdr) + 1 // + 1 is for 'u' + return +} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go new file mode 100644 index 0000000000..3dbd21d1b9 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -0,0 +1,108 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "fmt" + "go/token" + "go/types" + "io" + "os" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + var filename string + filename, id, err = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, err + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + buf := bufio.NewReader(rc) + data, err := ReadUnified(buf) + if err != nil { + err = fmt.Errorf("import %q: %v", path, err) + return + } + + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) + + return +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/iexport.go new file mode 100644 index 0000000000..780873e3ae --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -0,0 +1,1596 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "slices" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" +) + +// IExportShallow encodes "shallow" export data for the specified package. +// +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it is used for reporting +// bugs (e.g. recovered panics) encountered during export, enabling us +// to obtain via telemetry the stack that would otherwise be lost by +// merely returning an error. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, reportf) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { + const bundle = false + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...any) + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, nil) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs, nil) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package, reportf ReportFunc) (err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + // Report the stack via telemetry (see #71067). + if reportf != nil { + reportf("panic in exporter") + } + if ierr, ok := e.(internalError); ok { + // internalError usually means we exported a + // bad go/types data structure: a violation + // of an implicit precondition of Export. + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := iexporter{ + fset: fset, + version: version, + shallow: shallow, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + tparamNames: map[types.Object]string{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(uint64(p.version)) + hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } + io.Copy(out, &p.data0) + + return nil +} + +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + slices.Sort(needed) + + lines := file.Lines() // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + type pkgObj struct { + obj types.Object + name string // qualified name; differs from obj.Name for type params + } + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]pkgObj{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + name := w.p.exportName(obj) + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].name < objs[j].name + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.name) + w.uint64(index[obj.obj]) + } + } +} + +// exportName returns the 'exported' name of an object. It differs from +// obj.Name() only for type parameters (see tparamExportName for details). +func (p *iexporter) exportName(obj types.Object) (res string) { + if name := p.tparamNames[obj]; name != "" { + return name + } + return obj.Name() +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + version int + + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + + data0 intWriter + declIndex map[types.Object]uint64 + tparamNames map[types.Object]string // typeparam->exported name + typIndex map[types.Type]uint64 + + indent int // for tracing support +} + +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + +func (p *iexporter) trace(format string, args ...any) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } + + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark obj present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + prevFile string + prevLine int64 + prevColumn int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + if trace { + p.trace("exporting decl %v (%T)", obj, obj) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", obj) + }() + } + w := p.newWriter() + + switch obj := obj.(type) { + case *types.Var: + w.tag(varTag) + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } + } + + // Function. + if sig.TypeParams().Len() == 0 { + w.tag(funcTag) + } else { + w.tag(genericFuncTag) + } + w.pos(obj.Pos()) + // The tparam list of the function type is the declaration of the type + // params. So, write out the type params right now. Then those type params + // will be referenced via their type offset (via typOff) in all other + // places in the signature and function where they are used. + // + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + if tparams := sig.TypeParams(); tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + w.signature(sig) + + case *types.Const: + w.tag(constTag) + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + t := obj.Type() + + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { + w.tag(typeParamTag) + w.pos(obj.Pos()) + constraint := tparam.Constraint() + if p.version >= iexportVersionGo1_18 { + implicit := false + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { + implicit = iface.IsImplicit() + } + w.bool(implicit) + } + w.typ(constraint, obj.Pkg()) + break + } + + if obj.IsAlias() { + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } + w.pos(obj.Pos()) + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { + // Preserve materialized aliases, + // even of non-exported types. + t = aliases.Rhs(alias) + } + w.typ(t, obj.Pkg()) + break + } + + // Defined type. + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + if named.TypeParams().Len() == 0 { + w.tag(typeTag) + } else { + w.tag(genericTypeTag) + } + w.pos(obj.Pos()) + + if named.TypeParams().Len() > 0 { + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) + } + + underlying := named.Underlying() + w.typ(underlying, obj.Pkg()) + + if types.IsInterface(t) { + break + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := range n { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + + // Receiver type parameters are type arguments of the receiver type, so + // their name must be qualified before exporting recv. + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { + prefix := obj.Name() + "." + m.Name() + for i := 0; i < rparams.Len(); i++ { + rparam := rparams.At(i) + name := tparamExportName(prefix, rparam) + w.p.tparamNames[rparam.Obj()] = name + } + } + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { + w.posV1(pos) + } else { + w.posV0(pos) + } +} + +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + +func (w *exportWriter) posV1(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + column := int64(p.Column) + + deltaColumn := (column - w.prevColumn) << 1 + deltaLine := (line - w.prevLine) << 1 + + if file != w.prevFile { + deltaLine |= 1 + } + if deltaLine != 0 { + deltaColumn |= 1 + } + + w.int64(deltaColumn) + if deltaColumn&1 != 0 { + w.int64(deltaLine) + if deltaLine&1 != 0 { + w.string(file) + } + } + + w.prevFile = file + w.prevLine = line + w.prevColumn = column +} + +func (w *exportWriter) posV0(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedType(obj *types.TypeName) { + name := w.p.exportName(obj) + + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + w.string(name) + w.pkg(obj.Pkg()) +} + +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + if trace { + w.p.trace("exporting type %s (%T)", t, t) + w.p.indent++ + defer func() { + w.p.indent-- + w.p.trace("=> %s", t) + }() + } + switch t := t.(type) { + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } + w.startType(aliasType) + w.qualifiedType(t.Obj()) + + case *types.Named: + if targs := t.TypeArgs(); targs.Len() > 0 { + w.startType(instanceType) + // TODO(rfindley): investigate if this position is correct, and if it + // matters. + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(t.Origin(), pkg) + return + } + w.startType(definedType) + w.qualifiedType(t.Obj()) + + case *types.TypeParam: + w.startType(typeParamType) + w.qualifiedType(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.pkg(pkg) + w.signature(t) + + case *types.Struct: + w.startType(structType) + n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg + if n > 0 { + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } + } + w.pkg(fieldPkg) + w.uint64(uint64(n)) + + for i := range n { + f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } + w.pos(f.Pos()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg + w.typ(f.Type(), fieldPkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.pkg(pkg) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + ft := t.EmbeddedType(i) + tPkg := pkg + if named, _ := types.Unalias(ft).(*types.Named); named != nil { + w.pos(named.Obj().Pos()) + } else { + w.pos(token.NoPos) + } + w.typ(ft, tPkg) + } + + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + case *types.Union: + w.startType(unionType) + nt := t.Len() + w.uint64(uint64(nt)) + for i := range nt { + term := t.Term(i) + w.bool(term.Tilde()) + w.typ(term.Type(), pkg) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return + } + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { + w.uint64(uint64(ts.Len())) + for i := 0; i < ts.Len(); i++ { + w.typ(ts.At(i), pkg) + } +} + +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { + ll := uint64(list.Len()) + w.uint64(ll) + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + // Set the type parameter exportName before exporting its type. + exportName := tparamExportName(prefix, tparam) + w.p.tparamNames[tparam.Obj()] = exportName + w.typ(list.At(i), pkg) + } +} + +const blankMarker = "$" + +// tparamExportName returns the 'exported' name of a type parameter, which +// differs from its actual object name: it is prefixed with a qualifier, and +// blank type parameter names are disambiguated by their index in the type +// parameter list. +func tparamExportName(prefix string, tparam *types.TypeParam) string { + assert(prefix != "") + name := tparam.Obj().Name() + if name == "_" { + name = blankMarker + strconv.Itoa(tparam.Index()) + } + return prefix + "." + name +} + +// tparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See tparamExportName +// for details. +func tparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + errorf("malformed type parameter export name %s: missing prefix", exportName) + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := range n { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + if w.p.version >= iexportVersionGo1_18 { + w.int64(int64(v.Kind())) + } + + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...any) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/iimport.go new file mode 100644 index 0000000000..82e6c9d2dc --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -0,0 +1,1120 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See iexport.go for the export data format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "slices" + "sort" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGo1_18 = 2 + iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType + aliasType +) + +// Object tags +const ( + varTag = 'V' + funcTag = 'F' + genericFuncTag = 'G' + constTag = 'C' + aliasTag = 'A' + genericAliasTag = 'B' + typeParamTag = 'P' + typeTag = 'T' + genericTypeTag = 'U' +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) +} + +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + if !debug { + defer func() { + if e := recover(); e != nil { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) + } + } + }() + } + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) + } + } + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + if version > iexportVersionGo1_18 { + errorf("unstable iexport format version %d, just rebuild compiler and std library", version) + } else { + errorf("unknown iexport format version %d", version) + } + } + + sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if shallow { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) + + p := iimporter{ + version: int(version), + ipath: path, + aliases: aliases.Enabled(), + shallow: shallow, + reportf: reportf, + + stringData: stringData, + stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + // Separate map for typeparams, keyed by their package and unique + // name. + tparamIndex: make(map[ident]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) + for i := range items { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff + + // Read index for package. + nameIndex := make(map[string]uint64) + nSyms := r.uint64() + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) + for ; nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := slices.Clone(pkgList[1:]) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the typeParamTag case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + d.t.SetConstraint(d.constraint) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + + return pkgs, nil +} + +type setConstraintArgs struct { + t *types.TypeParam + constraint types.Type +} + +type iimporter struct { + version int + ipath string + + aliases bool + shallow bool + reportf ReportFunc // if non-nil, used to report bugs + + stringData []byte + stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + tparamIndex map[ident]types.Type + + fake fakeFileSet + interfaceList []*types.Interface + + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + + indent int // for tracing support +} + +func (p *iimporter) trace(format string, args ...any) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + if debug { + p.trace("import decl %s", name) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", name) + }() + } + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types.Named, rhs types.Type) bool { + if def == nil { + return true + } + iface, _ := types.Unalias(rhs).(*types.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } + typ := r.typ() + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 + r.declare(obj) + + case constTag: + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case funcTag, genericFuncTag: + var tparams []*types.TypeParam + if tag == genericFuncTag { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case typeTag, genericTypeTag: + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == genericTypeTag { + tparams := r.tparamList() + named.SetTypeParams(tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + _, recvNamed := typesinternal.ReceiverNamed(recv) + targs := recvNamed.TypeArgs() + var rparams []*types.TypeParam + if targs.Len() > 0 { + rparams = make([]*types.TypeParam, targs.Len()) + for i := range rparams { + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case typeParamTag: + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := tparamName(name) + tn := types.NewTypeName(pos, r.currPkg, name0, nil) + t := types.NewTypeParam(tn, nil) + + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + var implicit bool + if r.p.version >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := types.Unalias(constraint).(*types.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + iface.MarkImplicit() + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case varTag: + typ := r.typ() + + v := types.NewVar(pos, r.currPkg, name, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + r.declare(v) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + if r.p.version >= iexportVersionGo1_18 { + // TODO: add support for using the kind. + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.shallow { + // precise offsets are encoded only in shallow mode + return r.posv2() + } + if r.p.version >= iexportVersionPosCol { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := types.Unalias(t).(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) (res types.Type) { + k := r.kind() + if debug { + r.p.trace("importing type %d (base: %v)", k, base) + r.p.indent++ + defer func() { + r.p.indent-- + r.p.trace("=> %s", res) + }() + } + switch k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case aliasType, definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + msig := r.signature(recv, nil, nil) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method + } + + typ := types.NewInterfaceType(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Environment + t, _ := types.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) + return t + + case unionType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*types.Term, r.uint64()) + for i := range terms { + terms[i] = types.NewTerm(r.bool(), r.typ()) + } + return types.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*types.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*types.TypeParam, n) + for i := range xs { + // Note: the standard library importer is tolerant of nil types here, + // though would panic in SetTypeParams. + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 0000000000..7586bfaca6 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 0000000000..907c8557a5 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/support.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 0000000000..4af810dc41 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/openshift/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go new file mode 100644 index 0000000000..37b4a39e9e --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -0,0 +1,761 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sort" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/pkgbits" + "golang.org/x/tools/internal/typesinternal" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + aliases bool // create types.Alias nodes + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + + s := string(data) + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + aliases: aliases.Enabled(), + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // derived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } + typ := r.typ() + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + typesinternal.SetVarKind(recv, typesinternal.RecvVar) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + v := types.NewVar(pos, objPkg, objName, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + declare(v) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 0000000000..58721202de --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,567 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// A Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// event keys for go command invocations +var ( + verb = keys.NewString("verb", "go command verb") + directory = keys.NewString("directory", "") +) + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)} +} + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +// Postcondition: both error results have same nilness. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + } + + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for range maxInFlight { + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + // TODO(rfindley): remove, in favor of Args. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + // TODO(rfindley): remove, in favor of Args. + ModFile string + + // Overlay is the name of the JSON overlay file that describes + // unsaved editor buffers; see [WriteOverlays]. + // If set, the go command is invoked with -overlay=Overlay. + // TODO(rfindley): remove, in favor of Args. + Overlay string + + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...any) +} + +// Postcondition: both error results have same nilness. +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +// logf logs if i.Logf is non-nil. +func (i *Invocation) logf(format string, args ...any) { + if i.Logf != nil { + i.Logf(format, args...) + } +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second + + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd has a special feature where if the cwd and the PWD + // are the same node then it trusts the PWD, so by setting it + // in the env for the child process we fix up all the paths + // returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + + debugStr := cmdDebugStr(cmd) + i.logf("starting %v", debugStr) + start := time.Now() + defer func() { + i.logf("%s for %v", time.Since(start), debugStr) + }() + + return runCmdContext(ctx, cmd) +} + +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + startTime := time.Now() + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { + return err + } + + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + } + + // Cancelled. Interrupt and see if it ends voluntarily. + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } + } + + // Didn't shut down in response to interrupt. Kill it hard. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { + log.Printf("error killing the Go command: %v", err) + } + + return <-resChan +} + +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd", "openbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. + + See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + log.Printf("Handling hanging Go command: running ps: %v", err) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) + } + } + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} + +// WriteOverlays writes each value in the overlay (see the Overlay +// field of go/packages.Config) to a temporary file and returns the name +// of a JSON file describing the mapping that is suitable for the "go +// list -overlay" flag. +// +// On success, the caller must call the cleanup function exactly once +// when the files are no longer needed. +func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(overlay) == 0 { + return "", func() {}, nil + } + + dir, err := os.MkdirTemp("", "gocommand-*") + if err != nil { + return "", nil, err + } + + // The caller must clean up this directory, + // unless this function returns an error. + // (The cleanup operand of each return + // statement below is ignored.) + defer func() { + cleanup = func() { + os.RemoveAll(dir) + } + if err != nil { + cleanup() + cleanup = nil + } + }() + + // Write each map entry to a temporary file. + overlays := make(map[string]string) + for k, v := range overlay { + // Use a unique basename for each file (001-foo.go), + // to avoid creating nested directories. + base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) + filename := filepath.Join(dir, base) + err := os.WriteFile(filename, v, 0666) + if err != nil { + return "", nil, err + } + overlays[k] = filename + } + + // Write the JSON overlay file that maps logical file names to temp files. + // + // OverlayJSON is the format overlay files are expected to be in. + // The Replace map maps from overlaid paths to replacement paths: + // the Go command will forward all reads trying to open + // each overlaid path to its replacement path, or consider the overlaid + // path not to exist if the replacement path is empty. + // + // From golang/go#39958. + type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", nil, err + } + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0666); err != nil { + return "", nil, err + } + + return filename, nil, nil +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 0000000000..469c648e4d --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 0000000000..169d37c8e9 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/openshift/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/openshift/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 0000000000..e38d1fb488 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,163 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return false, nil, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil + } + if mainMod == nil || !go114 { + return false, nil, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return true, mainMod, nil + } + } + return false, nil, nil +} + +// getMainModuleAnd114 gets one of the main modules' information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} + +// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) { + inv.Verb = "env" + inv.Args = []string{"GOWORK"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goWork := string(bytes.TrimSpace(stdout.Bytes())) + if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() { + mainMods, err := getWorkspaceMainModules(ctx, inv, r) + if err != nil { + return false, nil, err + } + return true, mainMods, nil + } + return false, nil, nil +} + +// getWorkspaceMainModules gets the main modules' information. +// This is the information needed to figure out if vendoring should be enabled. +func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, err + } + + lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n") + if len(lines) < 4 { + return nil, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mods := make([]*ModuleJSON, 0, len(lines)/4) + for i := 0; i < len(lines); i += 4 { + mods = append(mods, &ModuleJSON{ + Path: lines[i], + Dir: lines[i+1], + GoMod: lines[i+2], + GoVersion: lines[i+3], + Main: true, + }) + } + return mods, nil +} diff --git a/openshift/vendor/golang.org/x/tools/internal/gocommand/version.go b/openshift/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 0000000000..446c5846a6 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.BuildFlags = nil // This is not a build command. + inv.ModFlag = "" + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" and return highest go1.X value. + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/openshift/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/openshift/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 0000000000..929b470beb --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import "fmt" + +var GetDepsErrors = func(p any) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +func (err PackageError) String() string { + return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack) +} + +var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/codes.go new file mode 100644 index 0000000000..f0cabde96e --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/decoder.go new file mode 100644 index 0000000000..c0aba26c48 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -0,0 +1,519 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version Version + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) + + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + panicf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := range binary.MaxVarintLen64 { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Uint64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/doc.go new file mode 100644 index 0000000000..c8a2796b5e --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/encoder.go new file mode 100644 index 0000000000..c17a12399d --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -0,0 +1,392 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" + "strings" +) + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // version of the bitstream. + version Version + + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { + return PkgEncoder{ + version: version, + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(uint32(pw.version)) + + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + } + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + RelocMap map[RelocEnt]uint32 + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb strings.Builder + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + panicf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + e := RelocEnt{r, idx} + if w.RelocMap != nil { + if i, ok := w.RelocMap[e]; ok { + return int(i) + } + } else { + w.RelocMap = make(map[RelocEnt]uint32) + } + + i := len(w.Relocs) + w.RelocMap[e] = uint32(i) + w.Relocs = append(w.Relocs, e) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Uint encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { + w.Sync(SyncString) + w.Reloc(RelocString, idx) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + panicf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/flags.go new file mode 100644 index 0000000000..654222745f --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/reloc.go new file mode 100644 index 0000000000..fcdfb97ca9 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int32 + +// An Index represents a bitstream element index within a particular +// section. +type Index int32 + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/support.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/support.go new file mode 100644 index 0000000000..50534a2955 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func panicf(format string, args ...any) { + panic(fmt.Errorf(format, args...)) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/sync.go new file mode 100644 index 0000000000..1520b73afb --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -0,0 +1,136 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "runtime" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI +) diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go new file mode 100644 index 0000000000..582ad56d3e --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,92 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/openshift/vendor/golang.org/x/tools/internal/pkgbits/version.go b/openshift/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 0000000000..53af9df22b --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/stdlib/deps.go b/openshift/vendor/golang.org/x/tools/internal/stdlib/deps.go new file mode 100644 index 0000000000..77cf8d2181 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -0,0 +1,359 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +type pkginfo struct { + name string + deps string // list of indices of dependencies, as varint-encoded deltas +} + +var deps = [...]pkginfo{ + {"archive/tar", "\x03j\x03E5\x01\v\x01#\x01\x01\x02\x05\n\x02\x01\x02\x02\v"}, + {"archive/zip", "\x02\x04`\a\x16\x0205\x01+\x05\x01\x11\x03\x02\r\x04"}, + {"bufio", "\x03j}F\x13"}, + {"bytes", "m+R\x03\fH\x02\x02"}, + {"cmp", ""}, + {"compress/bzip2", "\x02\x02\xe6\x01C"}, + {"compress/flate", "\x02k\x03z\r\x025\x01\x03"}, + {"compress/gzip", "\x02\x04`\a\x03\x15eU"}, + {"compress/lzw", "\x02k\x03z"}, + {"compress/zlib", "\x02\x04`\a\x03\x13\x01f"}, + {"container/heap", "\xae\x02"}, + {"container/list", ""}, + {"container/ring", ""}, + {"context", "m\\i\x01\f"}, + {"crypto", "\x83\x01gE"}, + {"crypto/aes", "\x10\n\a\x8e\x02"}, + {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1c,Q"}, + {"crypto/des", "\x10\x13\x1d-,\x96\x01\x03"}, + {"crypto/dsa", "@\x04)}\x0e"}, + {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1c}"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1c}\x0e\x04L\x01"}, + {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1c}E"}, + {"crypto/elliptic", "0=}\x0e:"}, + {"crypto/fips140", " \x05\x90\x01"}, + {"crypto/hkdf", "-\x12\x01-\x16"}, + {"crypto/hmac", "\x1a\x14\x11\x01\x112"}, + {"crypto/internal/boring", "\x0e\x02\rf"}, + {"crypto/internal/boring/bbig", "\x1a\xde\x01M"}, + {"crypto/internal/boring/bcache", "\xb3\x02\x12"}, + {"crypto/internal/boring/sig", ""}, + {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x19\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\r\x05\n"}, + {"crypto/internal/entropy", "E"}, + {"crypto/internal/fips140", ">/}9\r\x15"}, + {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05*\x8c\x016"}, + {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06*\x8a\x01"}, + {"crypto/internal/fips140/alias", "\xc5\x02"}, + {"crypto/internal/fips140/bigmod", "%\x17\x01\x06*\x8c\x01"}, + {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xac\x01["}, + {"crypto/internal/fips140/check/checktest", "%\xfe\x01\""}, + {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01(}\x0f9"}, + {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f1}\x0f9"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x067}H"}, + {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v7\xc2\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "%\a\f\x041\x8c\x019"}, + {"crypto/internal/fips140/edwards25519/field", "%\x13\x041\x8c\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x069"}, + {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x017"}, + {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x041"}, + {"crypto/internal/fips140/nistec", "%\f\a\x041\x8c\x01*\x0f\x13"}, + {"crypto/internal/fips140/nistec/fiat", "%\x135\x8c\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x069"}, + {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x025}H"}, + {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06*\x8c\x01"}, + {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x010\x8c\x01L"}, + {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06*\x8c\x01"}, + {"crypto/internal/fips140/ssh", " \x05"}, + {"crypto/internal/fips140/subtle", "#"}, + {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x027"}, + {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b1"}, + {"crypto/internal/fips140deps", ""}, + {"crypto/internal/fips140deps/byteorder", "\x99\x01"}, + {"crypto/internal/fips140deps/cpu", "\xad\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb5\x01"}, + {"crypto/internal/fips140hash", "5\x1a4\xc2\x01"}, + {"crypto/internal/fips140only", "'\r\x01\x01M25"}, + {"crypto/internal/fips140test", ""}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d#,`N"}, + {"crypto/internal/impl", "\xb0\x02"}, + {"crypto/internal/randutil", "\xea\x01\x12"}, + {"crypto/internal/sysrand", "mi!\x1f\r\x0f\x01\x01\v\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "m"}, + {"crypto/md5", "\x0e2-\x16\x16`"}, + {"crypto/mlkem", "/"}, + {"crypto/pbkdf2", "2\r\x01-\x16"}, + {"crypto/rand", "\x1a\x06\a\x19\x04\x01(}\x0eM"}, + {"crypto/rc4", "#\x1d-\xc2\x01"}, + {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1c\x03\x1325\r\x01"}, + {"crypto/sha1", "\x0e\f&-\x16\x16\x14L"}, + {"crypto/sha256", "\x0e\f\x1aO"}, + {"crypto/sha3", "\x0e'N\xc2\x01"}, + {"crypto/sha512", "\x0e\f\x1cM"}, + {"crypto/subtle", "8\x96\x01U"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x14\b5\x16\x16\r\n\x01\x01\x01\x02\x01\f\x06\x02\x01"}, + {"crypto/tls/internal/fips140tls", " \x93\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x032\x01\x02\t\x01\x01\x01\a\x10\x05\x01\x06\x02\x05\f\x01\x02\r\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "c\x06\a\x88\x01G"}, + {"database/sql", "\x03\nJ\x16\x03z\f\x06\"\x05\n\x02\x03\x01\f\x02\x02\x02"}, + {"database/sql/driver", "\r`\x03\xae\x01\x11\x10"}, + {"debug/buildinfo", "\x03W\x02\x01\x01\b\a\x03`\x18\x02\x01+\x0f "}, + {"debug/dwarf", "\x03c\a\x03z1\x13\x01\x01"}, + {"debug/elf", "\x03\x06P\r\a\x03`\x19\x01,\x19\x01\x15"}, + {"debug/gosym", "\x03c\n\xbe\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06P\r\n`\x1a,\x19\x01"}, + {"debug/pe", "\x03\x06P\r\a\x03`\x1a,\x19\x01\x15"}, + {"debug/plan9obj", "f\a\x03`\x1a,"}, + {"embed", "m+:\x18\x01T"}, + {"embed/internal/embedtest", ""}, + {"encoding", ""}, + {"encoding/ascii85", "\xea\x01E"}, + {"encoding/asn1", "\x03j\x03\x87\x01\x01&\x0f\x02\x01\x0f\x03\x01"}, + {"encoding/base32", "\xea\x01C\x02"}, + {"encoding/base64", "\x99\x01QC\x02"}, + {"encoding/binary", "m}\r'\x0f\x05"}, + {"encoding/csv", "\x02\x01j\x03zF\x11\x02"}, + {"encoding/gob", "\x02_\x05\a\x03`\x1a\f\x01\x02\x1d\b\x14\x01\x0e\x02"}, + {"encoding/hex", "m\x03zC\x03"}, + {"encoding/json", "\x03\x01]\x04\b\x03z\r'\x0f\x02\x01\x02\x0f\x01\x01\x02"}, + {"encoding/pem", "\x03b\b}C\x03"}, + {"encoding/xml", "\x02\x01^\f\x03z4\x05\f\x01\x02\x0f\x02"}, + {"errors", "\xc9\x01|"}, + {"expvar", "jK9\t\n\x15\r\n\x02\x03\x01\x10"}, + {"flag", "a\f\x03z,\b\x05\n\x02\x01\x0f"}, + {"fmt", "mE8\r\x1f\b\x0f\x02\x03\x11"}, + {"go/ast", "\x03\x01l\x0f\x01j\x03)\b\x0f\x02\x01"}, + {"go/ast/internal/tests", ""}, + {"go/build", "\x02\x01j\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\n\x02\x01\x11\x02\x02"}, + {"go/build/constraint", "m\xc2\x01\x01\x11\x02"}, + {"go/constant", "p\x10w\x01\x016\x01\x02\x11"}, + {"go/doc", "\x04l\x01\x06\t=-1\x12\x02\x01\x11\x02"}, + {"go/doc/comment", "\x03m\xbd\x01\x01\x01\x01\x11\x02"}, + {"go/format", "\x03m\x01\f\x01\x02jF"}, + {"go/importer", "s\a\x01\x01\x04\x01i9"}, + {"go/internal/gccgoimporter", "\x02\x01W\x13\x03\x05\v\x01g\x02,\x01\x05\x13\x01\v\b"}, + {"go/internal/gcimporter", "\x02n\x10\x01/\x05\x0e',\x17\x03\x02"}, + {"go/internal/srcimporter", "p\x01\x02\n\x03\x01i,\x01\x05\x14\x02\x13"}, + {"go/parser", "\x03j\x03\x01\x03\v\x01j\x01+\x06\x14"}, + {"go/printer", "p\x01\x03\x03\tj\r\x1f\x17\x02\x01\x02\n\x05\x02"}, + {"go/scanner", "\x03m\x10j2\x12\x01\x12\x02"}, + {"go/token", "\x04l\xbd\x01\x02\x03\x01\x0e\x02"}, + {"go/types", "\x03\x01\x06c\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\n\x01\x01\x01\x02\x01\x0e\x02\x02"}, + {"go/version", "\xba\x01v"}, + {"hash", "\xea\x01"}, + {"hash/adler32", "m\x16\x16"}, + {"hash/crc32", "m\x16\x16\x14\x85\x01\x01\x12"}, + {"hash/crc64", "m\x16\x16\x99\x01"}, + {"hash/fnv", "m\x16\x16`"}, + {"hash/maphash", "\x94\x01\x05\x1b\x03@N"}, + {"html", "\xb0\x02\x02\x11"}, + {"html/template", "\x03g\x06\x19,5\x01\v \x05\x01\x02\x03\x0e\x01\x02\v\x01\x03\x02"}, + {"image", "\x02k\x1f^\x0f6\x03\x01"}, + {"image/color", ""}, + {"image/color/palette", "\x8c\x01"}, + {"image/draw", "\x8b\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05e\x03\x1b\x01\x01\x01\vQ"}, + {"image/internal/imageutil", "\x8b\x01"}, + {"image/jpeg", "\x02k\x1e\x01\x04Z"}, + {"image/png", "\x02\a]\n\x13\x02\x06\x01^E"}, + {"index/suffixarray", "\x03c\a}\r*\f\x01"}, + {"internal/abi", "\xb4\x01\x91\x01"}, + {"internal/asan", "\xc5\x02"}, + {"internal/bisect", "\xa3\x02\x0f\x01"}, + {"internal/buildcfg", "pG_\x06\x02\x05\f\x01"}, + {"internal/bytealg", "\xad\x01\x98\x01"}, + {"internal/byteorder", ""}, + {"internal/cfg", ""}, + {"internal/chacha8rand", "\x99\x01\x1b\x91\x01"}, + {"internal/copyright", ""}, + {"internal/coverage", ""}, + {"internal/coverage/calloc", ""}, + {"internal/coverage/cfile", "j\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x01\x1f,\x06\a\f\x01\x03\f\x06"}, + {"internal/coverage/cformat", "\x04l-\x04I\f7\x01\x02\f"}, + {"internal/coverage/cmerge", "p-Z"}, + {"internal/coverage/decodecounter", "f\n-\v\x02@,\x19\x16"}, + {"internal/coverage/decodemeta", "\x02d\n\x17\x16\v\x02@,"}, + {"internal/coverage/encodecounter", "\x02d\n-\f\x01\x02>\f \x17"}, + {"internal/coverage/encodemeta", "\x02\x01c\n\x13\x04\x16\r\x02>,/"}, + {"internal/coverage/pods", "\x04l-y\x06\x05\f\x02\x01"}, + {"internal/coverage/rtcov", "\xc5\x02"}, + {"internal/coverage/slicereader", "f\nz["}, + {"internal/coverage/slicewriter", "pz"}, + {"internal/coverage/stringtab", "p8\x04>"}, + {"internal/coverage/test", ""}, + {"internal/coverage/uleb128", ""}, + {"internal/cpu", "\xc5\x02"}, + {"internal/dag", "\x04l\xbd\x01\x03"}, + {"internal/diff", "\x03m\xbe\x01\x02"}, + {"internal/exportdata", "\x02\x01j\x03\x03]\x1a,\x01\x05\x13\x01\x02"}, + {"internal/filepathlite", "m+:\x19B"}, + {"internal/fmtsort", "\x04\x9a\x02\x0f"}, + {"internal/fuzz", "\x03\nA\x18\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\f\x01\x02\x01\x01\v\x04\x02"}, + {"internal/goarch", ""}, + {"internal/godebug", "\x96\x01 |\x01\x12"}, + {"internal/godebugs", ""}, + {"internal/goexperiment", ""}, + {"internal/goos", ""}, + {"internal/goroot", "\x96\x02\x01\x05\x14\x02"}, + {"internal/gover", "\x04"}, + {"internal/goversion", ""}, + {"internal/itoa", ""}, + {"internal/lazyregexp", "\x96\x02\v\x0f\x02"}, + {"internal/lazytemplate", "\xea\x01,\x1a\x02\v"}, + {"internal/msan", "\xc5\x02"}, + {"internal/nettrace", ""}, + {"internal/obscuretestdata", "e\x85\x01,"}, + {"internal/oserror", "m"}, + {"internal/pkgbits", "\x03K\x18\a\x03\x05\vj\x0e\x1e\r\f\x01"}, + {"internal/platform", ""}, + {"internal/poll", "mO\x1a\x149\x0f\x01\x01\v\x06"}, + {"internal/profile", "\x03\x04f\x03z7\r\x01\x01\x0f"}, + {"internal/profilerecord", ""}, + {"internal/race", "\x94\x01\xb1\x01"}, + {"internal/reflectlite", "\x94\x01 3<\""}, + {"internal/runtime/atomic", "\xc5\x02"}, + {"internal/runtime/exithook", "\xca\x01{"}, + {"internal/runtime/maps", "\x94\x01\x01\x1f\v\t\x05\x01w"}, + {"internal/runtime/math", "\xb4\x01"}, + {"internal/runtime/sys", "\xb4\x01\x04"}, + {"internal/runtime/syscall", "\xc5\x02"}, + {"internal/saferio", "\xea\x01["}, + {"internal/singleflight", "\xb2\x02"}, + {"internal/stringslite", "\x98\x01\xad\x01"}, + {"internal/sync", "\x94\x01 \x14k\x12"}, + {"internal/synctest", "\xc5\x02"}, + {"internal/syscall/execenv", "\xb4\x02"}, + {"internal/syscall/unix", "\xa3\x02\x10\x01\x11"}, + {"internal/sysinfo", "\x02\x01\xaa\x01=,\x1a\x02"}, + {"internal/syslist", ""}, + {"internal/testenv", "\x03\n`\x02\x01*\x1a\x10'+\x01\x05\a\f\x01\x02\x02\x01\n"}, + {"internal/testlog", "\xb2\x02\x01\x12"}, + {"internal/testpty", "m\x03\xa6\x01"}, + {"internal/trace", "\x02\x01\x01\x06\\\a\x03n\x03\x03\x06\x03\n6\x01\x02\x0f\x06"}, + {"internal/trace/internal/testgen", "\x03c\nl\x03\x02\x03\x011\v\x0f"}, + {"internal/trace/internal/tracev1", "\x03\x01b\a\x03t\x06\r6\x01"}, + {"internal/trace/raw", "\x02d\nq\x03\x06E\x01\x11"}, + {"internal/trace/testtrace", "\x02\x01j\x03l\x03\x06\x057\f\x02\x01"}, + {"internal/trace/tracev2", ""}, + {"internal/trace/traceviewer", "\x02]\v\x06\x1a<\x16\a\a\x04\t\n\x15\x01\x05\a\f\x01\x02\r"}, + {"internal/trace/traceviewer/format", ""}, + {"internal/trace/version", "pq\t"}, + {"internal/txtar", "\x03m\xa6\x01\x1a"}, + {"internal/types/errors", "\xaf\x02"}, + {"internal/unsafeheader", "\xc5\x02"}, + {"internal/xcoff", "Y\r\a\x03`\x1a,\x19\x01"}, + {"internal/zstd", "f\a\x03z\x0f"}, + {"io", "m\xc5\x01"}, + {"io/fs", "m+*(1\x12\x12\x04"}, + {"io/ioutil", "\xea\x01\x01+\x17\x03"}, + {"iter", "\xc8\x01[\""}, + {"log", "pz\x05'\r\x0f\x01\f"}, + {"log/internal", ""}, + {"log/slog", "\x03\nT\t\x03\x03z\x04\x01\x02\x02\x04'\x05\n\x02\x01\x02\x01\f\x02\x02\x02"}, + {"log/slog/internal", ""}, + {"log/slog/internal/benchmarks", "\r`\x03z\x06\x03<\x10"}, + {"log/slog/internal/buffer", "\xb2\x02"}, + {"log/slog/internal/slogtest", "\xf0\x01"}, + {"log/syslog", "m\x03~\x12\x16\x1a\x02\r"}, + {"maps", "\xed\x01X"}, + {"math", "\xad\x01LL"}, + {"math/big", "\x03j\x03)\x14=\r\x02\x024\x01\x02\x13"}, + {"math/bits", "\xc5\x02"}, + {"math/cmplx", "\xf7\x01\x02"}, + {"math/rand", "\xb5\x01B;\x01\x12"}, + {"math/rand/v2", "m,\x02\\\x02L"}, + {"mime", "\x02\x01b\b\x03z\f \x17\x03\x02\x0f\x02"}, + {"mime/multipart", "\x02\x01G#\x03E5\f\x01\x06\x02\x15\x02\x06\x11\x02\x01\x15"}, + {"mime/quotedprintable", "\x02\x01mz"}, + {"net", "\x04\t`+\x1d\a\x04\x05\f\x01\x04\x14\x01%\x06\r\n\x05\x01\x01\v\x06\a"}, + {"net/http", "\x02\x01\x04\x04\x02=\b\x13\x01\a\x03E5\x01\x03\b\x01\x02\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\n\x01\x01\x01\x02\x01\x01\v\x02\x02\x02\b\x01\x01\x01"}, + {"net/http/cgi", "\x02P\x1b\x03z\x04\b\n\x01\x13\x01\x01\x01\x04\x01\x05\x02\n\x02\x01\x0f\x0e"}, + {"net/http/cookiejar", "\x04i\x03\x90\x01\x01\b\f\x18\x03\x02\r\x04"}, + {"net/http/fcgi", "\x02\x01\nY\a\x03z\x16\x01\x01\x14\x1a\x02\r"}, + {"net/http/httptest", "\x02\x01\nE\x02\x1b\x01z\x04\x12\x01\n\t\x02\x19\x01\x02\r\x0e"}, + {"net/http/httptrace", "\rEn@\x14\n!"}, + {"net/http/httputil", "\x02\x01\n`\x03z\x04\x0f\x03\x01\x05\x02\x01\v\x01\x1b\x02\r\x0e"}, + {"net/http/internal", "\x02\x01j\x03z"}, + {"net/http/internal/ascii", "\xb0\x02\x11"}, + {"net/http/internal/httpcommon", "\r`\x03\x96\x01\x0e\x01\x19\x01\x01\x02\x1b\x02"}, + {"net/http/internal/testcert", "\xb0\x02"}, + {"net/http/pprof", "\x02\x01\nc\x19,\x11$\x04\x13\x14\x01\r\x06\x03\x01\x02\x01\x0f"}, + {"net/internal/cgotest", ""}, + {"net/internal/socktest", "p\xc2\x01\x02"}, + {"net/mail", "\x02k\x03z\x04\x0f\x03\x14\x1c\x02\r\x04"}, + {"net/netip", "\x04i+\x01#;\x026\x15"}, + {"net/rpc", "\x02f\x05\x03\x10\n`\x04\x12\x01\x1d\x0f\x03\x02"}, + {"net/rpc/jsonrpc", "j\x03\x03z\x16\x11!"}, + {"net/smtp", "\x19.\v\x13\b\x03z\x16\x14\x1c"}, + {"net/textproto", "\x02\x01j\x03z\r\t/\x01\x02\x13"}, + {"net/url", "m\x03\x86\x01%\x12\x02\x01\x15"}, + {"os", "m+\x01\x18\x03\b\t\r\x03\x01\x04\x10\x018\n\x05\x01\x01\v\x06"}, + {"os/exec", "\x03\n`H \x01\x14\x01+\x06\a\f\x01\x04\v"}, + {"os/exec/internal/fdtest", "\xb4\x02"}, + {"os/signal", "\r\x89\x02\x17\x05\x02"}, + {"os/user", "\x02\x01j\x03z,\r\f\x01\x02"}, + {"path", "m+\xab\x01"}, + {"path/filepath", "m+\x19:+\r\n\x03\x04\x0f"}, + {"plugin", "m"}, + {"reflect", "m'\x04\x1c\b\f\x04\x02\x19\x10,\f\x03\x0f\x02\x02"}, + {"reflect/internal/example1", ""}, + {"reflect/internal/example2", ""}, + {"regexp", "\x03\xe7\x018\v\x02\x01\x02\x0f\x02"}, + {"regexp/syntax", "\xad\x02\x01\x01\x01\x11\x02"}, + {"runtime", "\x94\x01\x04\x01\x02\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x03\x0fd"}, + {"runtime/coverage", "\x9f\x01K"}, + {"runtime/debug", "pUQ\r\n\x02\x01\x0f\x06"}, + {"runtime/internal/startlinetest", ""}, + {"runtime/internal/wasitest", ""}, + {"runtime/metrics", "\xb6\x01A,\""}, + {"runtime/pprof", "\x02\x01\x01\x03\x06Y\a\x03$3#\r\x1f\r\n\x01\x01\x01\x02\x02\b\x03\x06"}, + {"runtime/race", "\xab\x02"}, + {"runtime/race/internal/amd64v1", ""}, + {"runtime/trace", "\rcz9\x0f\x01\x12"}, + {"slices", "\x04\xe9\x01\fL"}, + {"sort", "\xc9\x0104"}, + {"strconv", "m+:%\x02J"}, + {"strings", "m'\x04:\x18\x03\f9\x0f\x02\x02"}, + {"structs", ""}, + {"sync", "\xc8\x01\vP\x10\x12"}, + {"sync/atomic", "\xc5\x02"}, + {"syscall", "m(\x03\x01\x1b\b\x03\x03\x06\aT\n\x05\x01\x12"}, + {"testing", "\x03\n`\x02\x01X\x0f\x13\r\x04\x1b\x06\x02\x05\x02\a\x01\x02\x01\x02\x01\f\x02\x02\x02"}, + {"testing/fstest", "m\x03z\x01\v%\x12\x03\b\a"}, + {"testing/internal/testdeps", "\x02\v\xa6\x01'\x10,\x03\x05\x03\b\a\x02\r"}, + {"testing/iotest", "\x03j\x03z\x04"}, + {"testing/quick", "o\x01\x87\x01\x04#\x12\x0f"}, + {"testing/slogtest", "\r`\x03\x80\x01.\x05\x12\n"}, + {"text/scanner", "\x03mz,+\x02"}, + {"text/tabwriter", "pzY"}, + {"text/template", "m\x03B8\x01\v\x1f\x01\x05\x01\x02\x05\r\x02\f\x03\x02"}, + {"text/template/parse", "\x03m\xb3\x01\f\x01\x11\x02"}, + {"time", "m+\x1d\x1d'*\x0f\x02\x11"}, + {"time/tzdata", "m\xc7\x01\x11"}, + {"unicode", ""}, + {"unicode/utf16", ""}, + {"unicode/utf8", ""}, + {"unique", "\x94\x01>\x01P\x0f\x13\x12"}, + {"unsafe", ""}, + {"vendor/golang.org/x/crypto/chacha20", "\x10V\a\x8c\x01*'"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10V\a\xd9\x01\x04\x01\a"}, + {"vendor/golang.org/x/crypto/cryptobyte", "c\n\x03\x88\x01&!\n"}, + {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, + {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x15\x93\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "m"}, + {"vendor/golang.org/x/net/http/httpguts", "\x80\x02\x14\x1c\x13\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "m\x03\x90\x01\x15\x01\x1a\x13\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03j\x03zH"}, + {"vendor/golang.org/x/net/idna", "p\x87\x019\x13\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03c\a\x03z\x11\x05\x16\x01\f\f\x01\x02\x02\x01\n"}, + {"vendor/golang.org/x/sys/cpu", "\x96\x02\r\f\x01\x15"}, + {"vendor/golang.org/x/text/secure/bidirule", "m\xd6\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03j}Y"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\be~@\x15"}, + {"vendor/golang.org/x/text/unicode/norm", "f\nzH\x11\x11"}, + {"weak", "\x94\x01\x8f\x01\""}, +} diff --git a/openshift/vendor/golang.org/x/tools/internal/stdlib/import.go b/openshift/vendor/golang.org/x/tools/internal/stdlib/import.go new file mode 100644 index 0000000000..f6909878a8 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -0,0 +1,89 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdlib + +// This file provides the API for the import graph of the standard library. +// +// Be aware that the compiler-generated code for every package +// implicitly depends on package "runtime" and a handful of others +// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go). + +import ( + "encoding/binary" + "iter" + "slices" + "strings" +) + +// Imports returns the sequence of packages directly imported by the +// named standard packages, in name order. +// The imports of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Imports(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !yield(deps[depIndex].name) { + return + } + data = data[n:] + } + } + } + } +} + +// Dependencies returns the set of all dependencies of the named +// standard packages, including the initial package, +// in a deterministic topological order. +// The dependencies of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Dependencies(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var seen [1 + len(deps)/8]byte // bit set of seen packages + var visit func(i int) bool + visit = func(i int) bool { + bit := byte(1) << (i % 8) + if seen[i/8]&bit == 0 { + seen[i/8] |= bit + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !visit(int(depIndex)) { + return false + } + data = data[n:] + } + if !yield(deps[i].name) { + return false + } + } + return true + } + if !visit(i) { + return + } + } + } + } +} + +// find returns the index of pkg in the deps table. +func find(pkg string) (int, bool) { + return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int { + return strings.Compare(p.name, n) + }) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/openshift/vendor/golang.org/x/tools/internal/stdlib/manifest.go new file mode 100644 index 0000000000..64f0326b64 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -0,0 +1,17676 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +var PackageSymbols = map[string][]Symbol{ + "archive/tar": { + {"(*Header).FileInfo", Method, 1, ""}, + {"(*Reader).Next", Method, 0, ""}, + {"(*Reader).Read", Method, 0, ""}, + {"(*Writer).AddFS", Method, 22, ""}, + {"(*Writer).Close", Method, 0, ""}, + {"(*Writer).Flush", Method, 0, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"(*Writer).WriteHeader", Method, 0, ""}, + {"(Format).String", Method, 10, ""}, + {"ErrFieldTooLong", Var, 0, ""}, + {"ErrHeader", Var, 0, ""}, + {"ErrInsecurePath", Var, 20, ""}, + {"ErrWriteAfterClose", Var, 0, ""}, + {"ErrWriteTooLong", Var, 0, ""}, + {"FileInfoHeader", Func, 1, "func(fi fs.FileInfo, link string) (*Header, error)"}, + {"FileInfoNames", Type, 23, ""}, + {"Format", Type, 10, ""}, + {"FormatGNU", Const, 10, ""}, + {"FormatPAX", Const, 10, ""}, + {"FormatUSTAR", Const, 10, ""}, + {"FormatUnknown", Const, 10, ""}, + {"Header", Type, 0, ""}, + {"Header.AccessTime", Field, 0, ""}, + {"Header.ChangeTime", Field, 0, ""}, + {"Header.Devmajor", Field, 0, ""}, + {"Header.Devminor", Field, 0, ""}, + {"Header.Format", Field, 10, ""}, + {"Header.Gid", Field, 0, ""}, + {"Header.Gname", Field, 0, ""}, + {"Header.Linkname", Field, 0, ""}, + {"Header.ModTime", Field, 0, ""}, + {"Header.Mode", Field, 0, ""}, + {"Header.Name", Field, 0, ""}, + {"Header.PAXRecords", Field, 10, ""}, + {"Header.Size", Field, 0, ""}, + {"Header.Typeflag", Field, 0, ""}, + {"Header.Uid", Field, 0, ""}, + {"Header.Uname", Field, 0, ""}, + {"Header.Xattrs", Field, 3, ""}, + {"NewReader", Func, 0, "func(r io.Reader) *Reader"}, + {"NewWriter", Func, 0, "func(w io.Writer) *Writer"}, + {"Reader", Type, 0, ""}, + {"TypeBlock", Const, 0, ""}, + {"TypeChar", Const, 0, ""}, + {"TypeCont", Const, 0, ""}, + {"TypeDir", Const, 0, ""}, + {"TypeFifo", Const, 0, ""}, + {"TypeGNULongLink", Const, 1, ""}, + {"TypeGNULongName", Const, 1, ""}, + {"TypeGNUSparse", Const, 3, ""}, + {"TypeLink", Const, 0, ""}, + {"TypeReg", Const, 0, ""}, + {"TypeRegA", Const, 0, ""}, + {"TypeSymlink", Const, 0, ""}, + {"TypeXGlobalHeader", Const, 0, ""}, + {"TypeXHeader", Const, 0, ""}, + {"Writer", Type, 0, ""}, + }, + "archive/zip": { + {"(*File).DataOffset", Method, 2, ""}, + {"(*File).FileInfo", Method, 0, ""}, + {"(*File).ModTime", Method, 0, ""}, + {"(*File).Mode", Method, 0, ""}, + {"(*File).Open", Method, 0, ""}, + {"(*File).OpenRaw", Method, 17, ""}, + {"(*File).SetModTime", Method, 0, ""}, + {"(*File).SetMode", Method, 0, ""}, + {"(*FileHeader).FileInfo", Method, 0, ""}, + {"(*FileHeader).ModTime", Method, 0, ""}, + {"(*FileHeader).Mode", Method, 0, ""}, + {"(*FileHeader).SetModTime", Method, 0, ""}, + {"(*FileHeader).SetMode", Method, 0, ""}, + {"(*ReadCloser).Close", Method, 0, ""}, + {"(*ReadCloser).Open", Method, 16, ""}, + {"(*ReadCloser).RegisterDecompressor", Method, 6, ""}, + {"(*Reader).Open", Method, 16, ""}, + {"(*Reader).RegisterDecompressor", Method, 6, ""}, + {"(*Writer).AddFS", Method, 22, ""}, + {"(*Writer).Close", Method, 0, ""}, + {"(*Writer).Copy", Method, 17, ""}, + {"(*Writer).Create", Method, 0, ""}, + {"(*Writer).CreateHeader", Method, 0, ""}, + {"(*Writer).CreateRaw", Method, 17, ""}, + {"(*Writer).Flush", Method, 4, ""}, + {"(*Writer).RegisterCompressor", Method, 6, ""}, + {"(*Writer).SetComment", Method, 10, ""}, + {"(*Writer).SetOffset", Method, 5, ""}, + {"Compressor", Type, 2, ""}, + {"Decompressor", Type, 2, ""}, + {"Deflate", Const, 0, ""}, + {"ErrAlgorithm", Var, 0, ""}, + {"ErrChecksum", Var, 0, ""}, + {"ErrFormat", Var, 0, ""}, + {"ErrInsecurePath", Var, 20, ""}, + {"File", Type, 0, ""}, + {"File.FileHeader", Field, 0, ""}, + {"FileHeader", Type, 0, ""}, + {"FileHeader.CRC32", Field, 0, ""}, + {"FileHeader.Comment", Field, 0, ""}, + {"FileHeader.CompressedSize", Field, 0, ""}, + {"FileHeader.CompressedSize64", Field, 1, ""}, + {"FileHeader.CreatorVersion", Field, 0, ""}, + {"FileHeader.ExternalAttrs", Field, 0, ""}, + {"FileHeader.Extra", Field, 0, ""}, + {"FileHeader.Flags", Field, 0, ""}, + {"FileHeader.Method", Field, 0, ""}, + {"FileHeader.Modified", Field, 10, ""}, + {"FileHeader.ModifiedDate", Field, 0, ""}, + {"FileHeader.ModifiedTime", Field, 0, ""}, + {"FileHeader.Name", Field, 0, ""}, + {"FileHeader.NonUTF8", Field, 10, ""}, + {"FileHeader.ReaderVersion", Field, 0, ""}, + {"FileHeader.UncompressedSize", Field, 0, ""}, + {"FileHeader.UncompressedSize64", Field, 1, ""}, + {"FileInfoHeader", Func, 0, "func(fi fs.FileInfo) (*FileHeader, error)"}, + {"NewReader", Func, 0, "func(r io.ReaderAt, size int64) (*Reader, error)"}, + {"NewWriter", Func, 0, "func(w io.Writer) *Writer"}, + {"OpenReader", Func, 0, "func(name string) (*ReadCloser, error)"}, + {"ReadCloser", Type, 0, ""}, + {"ReadCloser.Reader", Field, 0, ""}, + {"Reader", Type, 0, ""}, + {"Reader.Comment", Field, 0, ""}, + {"Reader.File", Field, 0, ""}, + {"RegisterCompressor", Func, 2, "func(method uint16, comp Compressor)"}, + {"RegisterDecompressor", Func, 2, "func(method uint16, dcomp Decompressor)"}, + {"Store", Const, 0, ""}, + {"Writer", Type, 0, ""}, + }, + "bufio": { + {"(*Reader).Buffered", Method, 0, ""}, + {"(*Reader).Discard", Method, 5, ""}, + {"(*Reader).Peek", Method, 0, ""}, + {"(*Reader).Read", Method, 0, ""}, + {"(*Reader).ReadByte", Method, 0, ""}, + {"(*Reader).ReadBytes", Method, 0, ""}, + {"(*Reader).ReadLine", Method, 0, ""}, + {"(*Reader).ReadRune", Method, 0, ""}, + {"(*Reader).ReadSlice", Method, 0, ""}, + {"(*Reader).ReadString", Method, 0, ""}, + {"(*Reader).Reset", Method, 2, ""}, + {"(*Reader).Size", Method, 10, ""}, + {"(*Reader).UnreadByte", Method, 0, ""}, + {"(*Reader).UnreadRune", Method, 0, ""}, + {"(*Reader).WriteTo", Method, 1, ""}, + {"(*Scanner).Buffer", Method, 6, ""}, + {"(*Scanner).Bytes", Method, 1, ""}, + {"(*Scanner).Err", Method, 1, ""}, + {"(*Scanner).Scan", Method, 1, ""}, + {"(*Scanner).Split", Method, 1, ""}, + {"(*Scanner).Text", Method, 1, ""}, + {"(*Writer).Available", Method, 0, ""}, + {"(*Writer).AvailableBuffer", Method, 18, ""}, + {"(*Writer).Buffered", Method, 0, ""}, + {"(*Writer).Flush", Method, 0, ""}, + {"(*Writer).ReadFrom", Method, 1, ""}, + {"(*Writer).Reset", Method, 2, ""}, + {"(*Writer).Size", Method, 10, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"(*Writer).WriteByte", Method, 0, ""}, + {"(*Writer).WriteRune", Method, 0, ""}, + {"(*Writer).WriteString", Method, 0, ""}, + {"(ReadWriter).Available", Method, 0, ""}, + {"(ReadWriter).AvailableBuffer", Method, 18, ""}, + {"(ReadWriter).Discard", Method, 5, ""}, + {"(ReadWriter).Flush", Method, 0, ""}, + {"(ReadWriter).Peek", Method, 0, ""}, + {"(ReadWriter).Read", Method, 0, ""}, + {"(ReadWriter).ReadByte", Method, 0, ""}, + {"(ReadWriter).ReadBytes", Method, 0, ""}, + {"(ReadWriter).ReadFrom", Method, 1, ""}, + {"(ReadWriter).ReadLine", Method, 0, ""}, + {"(ReadWriter).ReadRune", Method, 0, ""}, + {"(ReadWriter).ReadSlice", Method, 0, ""}, + {"(ReadWriter).ReadString", Method, 0, ""}, + {"(ReadWriter).UnreadByte", Method, 0, ""}, + {"(ReadWriter).UnreadRune", Method, 0, ""}, + {"(ReadWriter).Write", Method, 0, ""}, + {"(ReadWriter).WriteByte", Method, 0, ""}, + {"(ReadWriter).WriteRune", Method, 0, ""}, + {"(ReadWriter).WriteString", Method, 0, ""}, + {"(ReadWriter).WriteTo", Method, 1, ""}, + {"ErrAdvanceTooFar", Var, 1, ""}, + {"ErrBadReadCount", Var, 15, ""}, + {"ErrBufferFull", Var, 0, ""}, + {"ErrFinalToken", Var, 6, ""}, + {"ErrInvalidUnreadByte", Var, 0, ""}, + {"ErrInvalidUnreadRune", Var, 0, ""}, + {"ErrNegativeAdvance", Var, 1, ""}, + {"ErrNegativeCount", Var, 0, ""}, + {"ErrTooLong", Var, 1, ""}, + {"MaxScanTokenSize", Const, 1, ""}, + {"NewReadWriter", Func, 0, "func(r *Reader, w *Writer) *ReadWriter"}, + {"NewReader", Func, 0, "func(rd io.Reader) *Reader"}, + {"NewReaderSize", Func, 0, "func(rd io.Reader, size int) *Reader"}, + {"NewScanner", Func, 1, "func(r io.Reader) *Scanner"}, + {"NewWriter", Func, 0, "func(w io.Writer) *Writer"}, + {"NewWriterSize", Func, 0, "func(w io.Writer, size int) *Writer"}, + {"ReadWriter", Type, 0, ""}, + {"ReadWriter.Reader", Field, 0, ""}, + {"ReadWriter.Writer", Field, 0, ""}, + {"Reader", Type, 0, ""}, + {"ScanBytes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"}, + {"ScanLines", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"}, + {"ScanRunes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"}, + {"ScanWords", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"}, + {"Scanner", Type, 1, ""}, + {"SplitFunc", Type, 1, ""}, + {"Writer", Type, 0, ""}, + }, + "bytes": { + {"(*Buffer).Available", Method, 21, ""}, + {"(*Buffer).AvailableBuffer", Method, 21, ""}, + {"(*Buffer).Bytes", Method, 0, ""}, + {"(*Buffer).Cap", Method, 5, ""}, + {"(*Buffer).Grow", Method, 1, ""}, + {"(*Buffer).Len", Method, 0, ""}, + {"(*Buffer).Next", Method, 0, ""}, + {"(*Buffer).Read", Method, 0, ""}, + {"(*Buffer).ReadByte", Method, 0, ""}, + {"(*Buffer).ReadBytes", Method, 0, ""}, + {"(*Buffer).ReadFrom", Method, 0, ""}, + {"(*Buffer).ReadRune", Method, 0, ""}, + {"(*Buffer).ReadString", Method, 0, ""}, + {"(*Buffer).Reset", Method, 0, ""}, + {"(*Buffer).String", Method, 0, ""}, + {"(*Buffer).Truncate", Method, 0, ""}, + {"(*Buffer).UnreadByte", Method, 0, ""}, + {"(*Buffer).UnreadRune", Method, 0, ""}, + {"(*Buffer).Write", Method, 0, ""}, + {"(*Buffer).WriteByte", Method, 0, ""}, + {"(*Buffer).WriteRune", Method, 0, ""}, + {"(*Buffer).WriteString", Method, 0, ""}, + {"(*Buffer).WriteTo", Method, 0, ""}, + {"(*Reader).Len", Method, 0, ""}, + {"(*Reader).Read", Method, 0, ""}, + {"(*Reader).ReadAt", Method, 0, ""}, + {"(*Reader).ReadByte", Method, 0, ""}, + {"(*Reader).ReadRune", Method, 0, ""}, + {"(*Reader).Reset", Method, 7, ""}, + {"(*Reader).Seek", Method, 0, ""}, + {"(*Reader).Size", Method, 5, ""}, + {"(*Reader).UnreadByte", Method, 0, ""}, + {"(*Reader).UnreadRune", Method, 0, ""}, + {"(*Reader).WriteTo", Method, 1, ""}, + {"Buffer", Type, 0, ""}, + {"Clone", Func, 20, "func(b []byte) []byte"}, + {"Compare", Func, 0, "func(a []byte, b []byte) int"}, + {"Contains", Func, 0, "func(b []byte, subslice []byte) bool"}, + {"ContainsAny", Func, 7, "func(b []byte, chars string) bool"}, + {"ContainsFunc", Func, 21, "func(b []byte, f func(rune) bool) bool"}, + {"ContainsRune", Func, 7, "func(b []byte, r rune) bool"}, + {"Count", Func, 0, "func(s []byte, sep []byte) int"}, + {"Cut", Func, 18, "func(s []byte, sep []byte) (before []byte, after []byte, found bool)"}, + {"CutPrefix", Func, 20, "func(s []byte, prefix []byte) (after []byte, found bool)"}, + {"CutSuffix", Func, 20, "func(s []byte, suffix []byte) (before []byte, found bool)"}, + {"Equal", Func, 0, "func(a []byte, b []byte) bool"}, + {"EqualFold", Func, 0, "func(s []byte, t []byte) bool"}, + {"ErrTooLarge", Var, 0, ""}, + {"Fields", Func, 0, "func(s []byte) [][]byte"}, + {"FieldsFunc", Func, 0, "func(s []byte, f func(rune) bool) [][]byte"}, + {"FieldsFuncSeq", Func, 24, "func(s []byte, f func(rune) bool) iter.Seq[[]byte]"}, + {"FieldsSeq", Func, 24, "func(s []byte) iter.Seq[[]byte]"}, + {"HasPrefix", Func, 0, "func(s []byte, prefix []byte) bool"}, + {"HasSuffix", Func, 0, "func(s []byte, suffix []byte) bool"}, + {"Index", Func, 0, "func(s []byte, sep []byte) int"}, + {"IndexAny", Func, 0, "func(s []byte, chars string) int"}, + {"IndexByte", Func, 0, "func(b []byte, c byte) int"}, + {"IndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"}, + {"IndexRune", Func, 0, "func(s []byte, r rune) int"}, + {"Join", Func, 0, "func(s [][]byte, sep []byte) []byte"}, + {"LastIndex", Func, 0, "func(s []byte, sep []byte) int"}, + {"LastIndexAny", Func, 0, "func(s []byte, chars string) int"}, + {"LastIndexByte", Func, 5, "func(s []byte, c byte) int"}, + {"LastIndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"}, + {"Lines", Func, 24, "func(s []byte) iter.Seq[[]byte]"}, + {"Map", Func, 0, "func(mapping func(r rune) rune, s []byte) []byte"}, + {"MinRead", Const, 0, ""}, + {"NewBuffer", Func, 0, "func(buf []byte) *Buffer"}, + {"NewBufferString", Func, 0, "func(s string) *Buffer"}, + {"NewReader", Func, 0, "func(b []byte) *Reader"}, + {"Reader", Type, 0, ""}, + {"Repeat", Func, 0, "func(b []byte, count int) []byte"}, + {"Replace", Func, 0, "func(s []byte, old []byte, new []byte, n int) []byte"}, + {"ReplaceAll", Func, 12, "func(s []byte, old []byte, new []byte) []byte"}, + {"Runes", Func, 0, "func(s []byte) []rune"}, + {"Split", Func, 0, "func(s []byte, sep []byte) [][]byte"}, + {"SplitAfter", Func, 0, "func(s []byte, sep []byte) [][]byte"}, + {"SplitAfterN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"}, + {"SplitAfterSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"}, + {"SplitN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"}, + {"SplitSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"}, + {"Title", Func, 0, "func(s []byte) []byte"}, + {"ToLower", Func, 0, "func(s []byte) []byte"}, + {"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"}, + {"ToTitle", Func, 0, "func(s []byte) []byte"}, + {"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"}, + {"ToUpper", Func, 0, "func(s []byte) []byte"}, + {"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"}, + {"ToValidUTF8", Func, 13, "func(s []byte, replacement []byte) []byte"}, + {"Trim", Func, 0, "func(s []byte, cutset string) []byte"}, + {"TrimFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"}, + {"TrimLeft", Func, 0, "func(s []byte, cutset string) []byte"}, + {"TrimLeftFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"}, + {"TrimPrefix", Func, 1, "func(s []byte, prefix []byte) []byte"}, + {"TrimRight", Func, 0, "func(s []byte, cutset string) []byte"}, + {"TrimRightFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"}, + {"TrimSpace", Func, 0, "func(s []byte) []byte"}, + {"TrimSuffix", Func, 1, "func(s []byte, suffix []byte) []byte"}, + }, + "cmp": { + {"Compare", Func, 21, "func[T Ordered](x T, y T) int"}, + {"Less", Func, 21, "func[T Ordered](x T, y T) bool"}, + {"Or", Func, 22, "func[T comparable](vals ...T) T"}, + {"Ordered", Type, 21, ""}, + }, + "compress/bzip2": { + {"(StructuralError).Error", Method, 0, ""}, + {"NewReader", Func, 0, "func(r io.Reader) io.Reader"}, + {"StructuralError", Type, 0, ""}, + }, + "compress/flate": { + {"(*ReadError).Error", Method, 0, ""}, + {"(*WriteError).Error", Method, 0, ""}, + {"(*Writer).Close", Method, 0, ""}, + {"(*Writer).Flush", Method, 0, ""}, + {"(*Writer).Reset", Method, 2, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"(CorruptInputError).Error", Method, 0, ""}, + {"(InternalError).Error", Method, 0, ""}, + {"BestCompression", Const, 0, ""}, + {"BestSpeed", Const, 0, ""}, + {"CorruptInputError", Type, 0, ""}, + {"DefaultCompression", Const, 0, ""}, + {"HuffmanOnly", Const, 7, ""}, + {"InternalError", Type, 0, ""}, + {"NewReader", Func, 0, "func(r io.Reader) io.ReadCloser"}, + {"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) io.ReadCloser"}, + {"NewWriter", Func, 0, "func(w io.Writer, level int) (*Writer, error)"}, + {"NewWriterDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"}, + {"NoCompression", Const, 0, ""}, + {"ReadError", Type, 0, ""}, + {"ReadError.Err", Field, 0, ""}, + {"ReadError.Offset", Field, 0, ""}, + {"Reader", Type, 0, ""}, + {"Resetter", Type, 4, ""}, + {"WriteError", Type, 0, ""}, + {"WriteError.Err", Field, 0, ""}, + {"WriteError.Offset", Field, 0, ""}, + {"Writer", Type, 0, ""}, + }, + "compress/gzip": { + {"(*Reader).Close", Method, 0, ""}, + {"(*Reader).Multistream", Method, 4, ""}, + {"(*Reader).Read", Method, 0, ""}, + {"(*Reader).Reset", Method, 3, ""}, + {"(*Writer).Close", Method, 0, ""}, + {"(*Writer).Flush", Method, 1, ""}, + {"(*Writer).Reset", Method, 2, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"BestCompression", Const, 0, ""}, + {"BestSpeed", Const, 0, ""}, + {"DefaultCompression", Const, 0, ""}, + {"ErrChecksum", Var, 0, ""}, + {"ErrHeader", Var, 0, ""}, + {"Header", Type, 0, ""}, + {"Header.Comment", Field, 0, ""}, + {"Header.Extra", Field, 0, ""}, + {"Header.ModTime", Field, 0, ""}, + {"Header.Name", Field, 0, ""}, + {"Header.OS", Field, 0, ""}, + {"HuffmanOnly", Const, 8, ""}, + {"NewReader", Func, 0, "func(r io.Reader) (*Reader, error)"}, + {"NewWriter", Func, 0, "func(w io.Writer) *Writer"}, + {"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"}, + {"NoCompression", Const, 0, ""}, + {"Reader", Type, 0, ""}, + {"Reader.Header", Field, 0, ""}, + {"Writer", Type, 0, ""}, + {"Writer.Header", Field, 0, ""}, + }, + "compress/lzw": { + {"(*Reader).Close", Method, 17, ""}, + {"(*Reader).Read", Method, 17, ""}, + {"(*Reader).Reset", Method, 17, ""}, + {"(*Writer).Close", Method, 17, ""}, + {"(*Writer).Reset", Method, 17, ""}, + {"(*Writer).Write", Method, 17, ""}, + {"LSB", Const, 0, ""}, + {"MSB", Const, 0, ""}, + {"NewReader", Func, 0, "func(r io.Reader, order Order, litWidth int) io.ReadCloser"}, + {"NewWriter", Func, 0, "func(w io.Writer, order Order, litWidth int) io.WriteCloser"}, + {"Order", Type, 0, ""}, + {"Reader", Type, 17, ""}, + {"Writer", Type, 17, ""}, + }, + "compress/zlib": { + {"(*Writer).Close", Method, 0, ""}, + {"(*Writer).Flush", Method, 0, ""}, + {"(*Writer).Reset", Method, 2, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"BestCompression", Const, 0, ""}, + {"BestSpeed", Const, 0, ""}, + {"DefaultCompression", Const, 0, ""}, + {"ErrChecksum", Var, 0, ""}, + {"ErrDictionary", Var, 0, ""}, + {"ErrHeader", Var, 0, ""}, + {"HuffmanOnly", Const, 8, ""}, + {"NewReader", Func, 0, "func(r io.Reader) (io.ReadCloser, error)"}, + {"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) (io.ReadCloser, error)"}, + {"NewWriter", Func, 0, "func(w io.Writer) *Writer"}, + {"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"}, + {"NewWriterLevelDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"}, + {"NoCompression", Const, 0, ""}, + {"Resetter", Type, 4, ""}, + {"Writer", Type, 0, ""}, + }, + "container/heap": { + {"Fix", Func, 2, "func(h Interface, i int)"}, + {"Init", Func, 0, "func(h Interface)"}, + {"Interface", Type, 0, ""}, + {"Pop", Func, 0, "func(h Interface) any"}, + {"Push", Func, 0, "func(h Interface, x any)"}, + {"Remove", Func, 0, "func(h Interface, i int) any"}, + }, + "container/list": { + {"(*Element).Next", Method, 0, ""}, + {"(*Element).Prev", Method, 0, ""}, + {"(*List).Back", Method, 0, ""}, + {"(*List).Front", Method, 0, ""}, + {"(*List).Init", Method, 0, ""}, + {"(*List).InsertAfter", Method, 0, ""}, + {"(*List).InsertBefore", Method, 0, ""}, + {"(*List).Len", Method, 0, ""}, + {"(*List).MoveAfter", Method, 2, ""}, + {"(*List).MoveBefore", Method, 2, ""}, + {"(*List).MoveToBack", Method, 0, ""}, + {"(*List).MoveToFront", Method, 0, ""}, + {"(*List).PushBack", Method, 0, ""}, + {"(*List).PushBackList", Method, 0, ""}, + {"(*List).PushFront", Method, 0, ""}, + {"(*List).PushFrontList", Method, 0, ""}, + {"(*List).Remove", Method, 0, ""}, + {"Element", Type, 0, ""}, + {"Element.Value", Field, 0, ""}, + {"List", Type, 0, ""}, + {"New", Func, 0, "func() *List"}, + }, + "container/ring": { + {"(*Ring).Do", Method, 0, ""}, + {"(*Ring).Len", Method, 0, ""}, + {"(*Ring).Link", Method, 0, ""}, + {"(*Ring).Move", Method, 0, ""}, + {"(*Ring).Next", Method, 0, ""}, + {"(*Ring).Prev", Method, 0, ""}, + {"(*Ring).Unlink", Method, 0, ""}, + {"New", Func, 0, "func(n int) *Ring"}, + {"Ring", Type, 0, ""}, + {"Ring.Value", Field, 0, ""}, + }, + "context": { + {"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"}, + {"Background", Func, 7, "func() Context"}, + {"CancelCauseFunc", Type, 20, ""}, + {"CancelFunc", Type, 7, ""}, + {"Canceled", Var, 7, ""}, + {"Cause", Func, 20, "func(c Context) error"}, + {"Context", Type, 7, ""}, + {"DeadlineExceeded", Var, 7, ""}, + {"TODO", Func, 7, "func() Context"}, + {"WithCancel", Func, 7, "func(parent Context) (ctx Context, cancel CancelFunc)"}, + {"WithCancelCause", Func, 20, "func(parent Context) (ctx Context, cancel CancelCauseFunc)"}, + {"WithDeadline", Func, 7, "func(parent Context, d time.Time) (Context, CancelFunc)"}, + {"WithDeadlineCause", Func, 21, "func(parent Context, d time.Time, cause error) (Context, CancelFunc)"}, + {"WithTimeout", Func, 7, "func(parent Context, timeout time.Duration) (Context, CancelFunc)"}, + {"WithTimeoutCause", Func, 21, "func(parent Context, timeout time.Duration, cause error) (Context, CancelFunc)"}, + {"WithValue", Func, 7, "func(parent Context, key any, val any) Context"}, + {"WithoutCancel", Func, 21, "func(parent Context) Context"}, + }, + "crypto": { + {"(Hash).Available", Method, 0, ""}, + {"(Hash).HashFunc", Method, 4, ""}, + {"(Hash).New", Method, 0, ""}, + {"(Hash).Size", Method, 0, ""}, + {"(Hash).String", Method, 15, ""}, + {"BLAKE2b_256", Const, 9, ""}, + {"BLAKE2b_384", Const, 9, ""}, + {"BLAKE2b_512", Const, 9, ""}, + {"BLAKE2s_256", Const, 9, ""}, + {"Decrypter", Type, 5, ""}, + {"DecrypterOpts", Type, 5, ""}, + {"Hash", Type, 0, ""}, + {"MD4", Const, 0, ""}, + {"MD5", Const, 0, ""}, + {"MD5SHA1", Const, 0, ""}, + {"PrivateKey", Type, 0, ""}, + {"PublicKey", Type, 2, ""}, + {"RIPEMD160", Const, 0, ""}, + {"RegisterHash", Func, 0, "func(h Hash, f func() hash.Hash)"}, + {"SHA1", Const, 0, ""}, + {"SHA224", Const, 0, ""}, + {"SHA256", Const, 0, ""}, + {"SHA384", Const, 0, ""}, + {"SHA3_224", Const, 4, ""}, + {"SHA3_256", Const, 4, ""}, + {"SHA3_384", Const, 4, ""}, + {"SHA3_512", Const, 4, ""}, + {"SHA512", Const, 0, ""}, + {"SHA512_224", Const, 5, ""}, + {"SHA512_256", Const, 5, ""}, + {"Signer", Type, 4, ""}, + {"SignerOpts", Type, 4, ""}, + }, + "crypto/aes": { + {"(KeySizeError).Error", Method, 0, ""}, + {"BlockSize", Const, 0, ""}, + {"KeySizeError", Type, 0, ""}, + {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"}, + }, + "crypto/cipher": { + {"(StreamReader).Read", Method, 0, ""}, + {"(StreamWriter).Close", Method, 0, ""}, + {"(StreamWriter).Write", Method, 0, ""}, + {"AEAD", Type, 2, ""}, + {"Block", Type, 0, ""}, + {"BlockMode", Type, 0, ""}, + {"NewCBCDecrypter", Func, 0, "func(b Block, iv []byte) BlockMode"}, + {"NewCBCEncrypter", Func, 0, "func(b Block, iv []byte) BlockMode"}, + {"NewCFBDecrypter", Func, 0, "func(block Block, iv []byte) Stream"}, + {"NewCFBEncrypter", Func, 0, "func(block Block, iv []byte) Stream"}, + {"NewCTR", Func, 0, "func(block Block, iv []byte) Stream"}, + {"NewGCM", Func, 2, "func(cipher Block) (AEAD, error)"}, + {"NewGCMWithNonceSize", Func, 5, "func(cipher Block, size int) (AEAD, error)"}, + {"NewGCMWithRandomNonce", Func, 24, "func(cipher Block) (AEAD, error)"}, + {"NewGCMWithTagSize", Func, 11, "func(cipher Block, tagSize int) (AEAD, error)"}, + {"NewOFB", Func, 0, "func(b Block, iv []byte) Stream"}, + {"Stream", Type, 0, ""}, + {"StreamReader", Type, 0, ""}, + {"StreamReader.R", Field, 0, ""}, + {"StreamReader.S", Field, 0, ""}, + {"StreamWriter", Type, 0, ""}, + {"StreamWriter.Err", Field, 0, ""}, + {"StreamWriter.S", Field, 0, ""}, + {"StreamWriter.W", Field, 0, ""}, + }, + "crypto/des": { + {"(KeySizeError).Error", Method, 0, ""}, + {"BlockSize", Const, 0, ""}, + {"KeySizeError", Type, 0, ""}, + {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"}, + {"NewTripleDESCipher", Func, 0, "func(key []byte) (cipher.Block, error)"}, + }, + "crypto/dsa": { + {"ErrInvalidPublicKey", Var, 0, ""}, + {"GenerateKey", Func, 0, "func(priv *PrivateKey, rand io.Reader) error"}, + {"GenerateParameters", Func, 0, "func(params *Parameters, rand io.Reader, sizes ParameterSizes) error"}, + {"L1024N160", Const, 0, ""}, + {"L2048N224", Const, 0, ""}, + {"L2048N256", Const, 0, ""}, + {"L3072N256", Const, 0, ""}, + {"ParameterSizes", Type, 0, ""}, + {"Parameters", Type, 0, ""}, + {"Parameters.G", Field, 0, ""}, + {"Parameters.P", Field, 0, ""}, + {"Parameters.Q", Field, 0, ""}, + {"PrivateKey", Type, 0, ""}, + {"PrivateKey.PublicKey", Field, 0, ""}, + {"PrivateKey.X", Field, 0, ""}, + {"PublicKey", Type, 0, ""}, + {"PublicKey.Parameters", Field, 0, ""}, + {"PublicKey.Y", Field, 0, ""}, + {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, + {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"}, + }, + "crypto/ecdh": { + {"(*PrivateKey).Bytes", Method, 20, ""}, + {"(*PrivateKey).Curve", Method, 20, ""}, + {"(*PrivateKey).ECDH", Method, 20, ""}, + {"(*PrivateKey).Equal", Method, 20, ""}, + {"(*PrivateKey).Public", Method, 20, ""}, + {"(*PrivateKey).PublicKey", Method, 20, ""}, + {"(*PublicKey).Bytes", Method, 20, ""}, + {"(*PublicKey).Curve", Method, 20, ""}, + {"(*PublicKey).Equal", Method, 20, ""}, + {"Curve", Type, 20, ""}, + {"P256", Func, 20, "func() Curve"}, + {"P384", Func, 20, "func() Curve"}, + {"P521", Func, 20, "func() Curve"}, + {"PrivateKey", Type, 20, ""}, + {"PublicKey", Type, 20, ""}, + {"X25519", Func, 20, "func() Curve"}, + }, + "crypto/ecdsa": { + {"(*PrivateKey).ECDH", Method, 20, ""}, + {"(*PrivateKey).Equal", Method, 15, ""}, + {"(*PrivateKey).Public", Method, 4, ""}, + {"(*PrivateKey).Sign", Method, 4, ""}, + {"(*PublicKey).ECDH", Method, 20, ""}, + {"(*PublicKey).Equal", Method, 15, ""}, + {"(PrivateKey).Add", Method, 0, ""}, + {"(PrivateKey).Double", Method, 0, ""}, + {"(PrivateKey).IsOnCurve", Method, 0, ""}, + {"(PrivateKey).Params", Method, 0, ""}, + {"(PrivateKey).ScalarBaseMult", Method, 0, ""}, + {"(PrivateKey).ScalarMult", Method, 0, ""}, + {"(PublicKey).Add", Method, 0, ""}, + {"(PublicKey).Double", Method, 0, ""}, + {"(PublicKey).IsOnCurve", Method, 0, ""}, + {"(PublicKey).Params", Method, 0, ""}, + {"(PublicKey).ScalarBaseMult", Method, 0, ""}, + {"(PublicKey).ScalarMult", Method, 0, ""}, + {"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"}, + {"PrivateKey", Type, 0, ""}, + {"PrivateKey.D", Field, 0, ""}, + {"PrivateKey.PublicKey", Field, 0, ""}, + {"PublicKey", Type, 0, ""}, + {"PublicKey.Curve", Field, 0, ""}, + {"PublicKey.X", Field, 0, ""}, + {"PublicKey.Y", Field, 0, ""}, + {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, + {"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"}, + {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"}, + {"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"}, + }, + "crypto/ed25519": { + {"(*Options).HashFunc", Method, 20, ""}, + {"(PrivateKey).Equal", Method, 15, ""}, + {"(PrivateKey).Public", Method, 13, ""}, + {"(PrivateKey).Seed", Method, 13, ""}, + {"(PrivateKey).Sign", Method, 13, ""}, + {"(PublicKey).Equal", Method, 15, ""}, + {"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"}, + {"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"}, + {"Options", Type, 20, ""}, + {"Options.Context", Field, 20, ""}, + {"Options.Hash", Field, 20, ""}, + {"PrivateKey", Type, 13, ""}, + {"PrivateKeySize", Const, 13, ""}, + {"PublicKey", Type, 13, ""}, + {"PublicKeySize", Const, 13, ""}, + {"SeedSize", Const, 13, ""}, + {"Sign", Func, 13, "func(privateKey PrivateKey, message []byte) []byte"}, + {"SignatureSize", Const, 13, ""}, + {"Verify", Func, 13, "func(publicKey PublicKey, message []byte, sig []byte) bool"}, + {"VerifyWithOptions", Func, 20, "func(publicKey PublicKey, message []byte, sig []byte, opts *Options) error"}, + }, + "crypto/elliptic": { + {"(*CurveParams).Add", Method, 0, ""}, + {"(*CurveParams).Double", Method, 0, ""}, + {"(*CurveParams).IsOnCurve", Method, 0, ""}, + {"(*CurveParams).Params", Method, 0, ""}, + {"(*CurveParams).ScalarBaseMult", Method, 0, ""}, + {"(*CurveParams).ScalarMult", Method, 0, ""}, + {"Curve", Type, 0, ""}, + {"CurveParams", Type, 0, ""}, + {"CurveParams.B", Field, 0, ""}, + {"CurveParams.BitSize", Field, 0, ""}, + {"CurveParams.Gx", Field, 0, ""}, + {"CurveParams.Gy", Field, 0, ""}, + {"CurveParams.N", Field, 0, ""}, + {"CurveParams.Name", Field, 5, ""}, + {"CurveParams.P", Field, 0, ""}, + {"GenerateKey", Func, 0, "func(curve Curve, rand io.Reader) (priv []byte, x *big.Int, y *big.Int, err error)"}, + {"Marshal", Func, 0, "func(curve Curve, x *big.Int, y *big.Int) []byte"}, + {"MarshalCompressed", Func, 15, "func(curve Curve, x *big.Int, y *big.Int) []byte"}, + {"P224", Func, 0, "func() Curve"}, + {"P256", Func, 0, "func() Curve"}, + {"P384", Func, 0, "func() Curve"}, + {"P521", Func, 0, "func() Curve"}, + {"Unmarshal", Func, 0, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"}, + {"UnmarshalCompressed", Func, 15, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"}, + }, + "crypto/fips140": { + {"Enabled", Func, 24, "func() bool"}, + }, + "crypto/hkdf": { + {"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"}, + {"Extract", Func, 24, "func[H hash.Hash](h func() H, secret []byte, salt []byte) ([]byte, error)"}, + {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, secret []byte, salt []byte, info string, keyLength int) ([]byte, error)"}, + }, + "crypto/hmac": { + {"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"}, + {"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"}, + }, + "crypto/md5": { + {"BlockSize", Const, 0, ""}, + {"New", Func, 0, "func() hash.Hash"}, + {"Size", Const, 0, ""}, + {"Sum", Func, 2, "func(data []byte) [16]byte"}, + }, + "crypto/mlkem": { + {"(*DecapsulationKey1024).Bytes", Method, 24, ""}, + {"(*DecapsulationKey1024).Decapsulate", Method, 24, ""}, + {"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""}, + {"(*DecapsulationKey768).Bytes", Method, 24, ""}, + {"(*DecapsulationKey768).Decapsulate", Method, 24, ""}, + {"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""}, + {"(*EncapsulationKey1024).Bytes", Method, 24, ""}, + {"(*EncapsulationKey1024).Encapsulate", Method, 24, ""}, + {"(*EncapsulationKey768).Bytes", Method, 24, ""}, + {"(*EncapsulationKey768).Encapsulate", Method, 24, ""}, + {"CiphertextSize1024", Const, 24, ""}, + {"CiphertextSize768", Const, 24, ""}, + {"DecapsulationKey1024", Type, 24, ""}, + {"DecapsulationKey768", Type, 24, ""}, + {"EncapsulationKey1024", Type, 24, ""}, + {"EncapsulationKey768", Type, 24, ""}, + {"EncapsulationKeySize1024", Const, 24, ""}, + {"EncapsulationKeySize768", Const, 24, ""}, + {"GenerateKey1024", Func, 24, "func() (*DecapsulationKey1024, error)"}, + {"GenerateKey768", Func, 24, "func() (*DecapsulationKey768, error)"}, + {"NewDecapsulationKey1024", Func, 24, "func(seed []byte) (*DecapsulationKey1024, error)"}, + {"NewDecapsulationKey768", Func, 24, "func(seed []byte) (*DecapsulationKey768, error)"}, + {"NewEncapsulationKey1024", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey1024, error)"}, + {"NewEncapsulationKey768", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey768, error)"}, + {"SeedSize", Const, 24, ""}, + {"SharedKeySize", Const, 24, ""}, + }, + "crypto/pbkdf2": { + {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"}, + }, + "crypto/rand": { + {"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"}, + {"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"}, + {"Read", Func, 0, "func(b []byte) (n int, err error)"}, + {"Reader", Var, 0, ""}, + {"Text", Func, 24, "func() string"}, + }, + "crypto/rc4": { + {"(*Cipher).Reset", Method, 0, ""}, + {"(*Cipher).XORKeyStream", Method, 0, ""}, + {"(KeySizeError).Error", Method, 0, ""}, + {"Cipher", Type, 0, ""}, + {"KeySizeError", Type, 0, ""}, + {"NewCipher", Func, 0, "func(key []byte) (*Cipher, error)"}, + }, + "crypto/rsa": { + {"(*PSSOptions).HashFunc", Method, 4, ""}, + {"(*PrivateKey).Decrypt", Method, 5, ""}, + {"(*PrivateKey).Equal", Method, 15, ""}, + {"(*PrivateKey).Precompute", Method, 0, ""}, + {"(*PrivateKey).Public", Method, 4, ""}, + {"(*PrivateKey).Sign", Method, 4, ""}, + {"(*PrivateKey).Size", Method, 11, ""}, + {"(*PrivateKey).Validate", Method, 0, ""}, + {"(*PublicKey).Equal", Method, 15, ""}, + {"(*PublicKey).Size", Method, 11, ""}, + {"CRTValue", Type, 0, ""}, + {"CRTValue.Coeff", Field, 0, ""}, + {"CRTValue.Exp", Field, 0, ""}, + {"CRTValue.R", Field, 0, ""}, + {"DecryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error)"}, + {"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"}, + {"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"}, + {"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"}, + {"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"}, + {"ErrDecryption", Var, 0, ""}, + {"ErrMessageTooLong", Var, 0, ""}, + {"ErrVerification", Var, 0, ""}, + {"GenerateKey", Func, 0, "func(random io.Reader, bits int) (*PrivateKey, error)"}, + {"GenerateMultiPrimeKey", Func, 0, "func(random io.Reader, nprimes int, bits int) (*PrivateKey, error)"}, + {"OAEPOptions", Type, 5, ""}, + {"OAEPOptions.Hash", Field, 5, ""}, + {"OAEPOptions.Label", Field, 5, ""}, + {"OAEPOptions.MGFHash", Field, 20, ""}, + {"PKCS1v15DecryptOptions", Type, 5, ""}, + {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5, ""}, + {"PSSOptions", Type, 2, ""}, + {"PSSOptions.Hash", Field, 4, ""}, + {"PSSOptions.SaltLength", Field, 2, ""}, + {"PSSSaltLengthAuto", Const, 2, ""}, + {"PSSSaltLengthEqualsHash", Const, 2, ""}, + {"PrecomputedValues", Type, 0, ""}, + {"PrecomputedValues.CRTValues", Field, 0, ""}, + {"PrecomputedValues.Dp", Field, 0, ""}, + {"PrecomputedValues.Dq", Field, 0, ""}, + {"PrecomputedValues.Qinv", Field, 0, ""}, + {"PrivateKey", Type, 0, ""}, + {"PrivateKey.D", Field, 0, ""}, + {"PrivateKey.Precomputed", Field, 0, ""}, + {"PrivateKey.Primes", Field, 0, ""}, + {"PrivateKey.PublicKey", Field, 0, ""}, + {"PublicKey", Type, 0, ""}, + {"PublicKey.E", Field, 0, ""}, + {"PublicKey.N", Field, 0, ""}, + {"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"}, + {"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"}, + {"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"}, + {"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"}, + }, + "crypto/sha1": { + {"BlockSize", Const, 0, ""}, + {"New", Func, 0, "func() hash.Hash"}, + {"Size", Const, 0, ""}, + {"Sum", Func, 2, "func(data []byte) [20]byte"}, + }, + "crypto/sha256": { + {"BlockSize", Const, 0, ""}, + {"New", Func, 0, "func() hash.Hash"}, + {"New224", Func, 0, "func() hash.Hash"}, + {"Size", Const, 0, ""}, + {"Size224", Const, 0, ""}, + {"Sum224", Func, 2, "func(data []byte) [28]byte"}, + {"Sum256", Func, 2, "func(data []byte) [32]byte"}, + }, + "crypto/sha3": { + {"(*SHA3).AppendBinary", Method, 24, ""}, + {"(*SHA3).BlockSize", Method, 24, ""}, + {"(*SHA3).MarshalBinary", Method, 24, ""}, + {"(*SHA3).Reset", Method, 24, ""}, + {"(*SHA3).Size", Method, 24, ""}, + {"(*SHA3).Sum", Method, 24, ""}, + {"(*SHA3).UnmarshalBinary", Method, 24, ""}, + {"(*SHA3).Write", Method, 24, ""}, + {"(*SHAKE).AppendBinary", Method, 24, ""}, + {"(*SHAKE).BlockSize", Method, 24, ""}, + {"(*SHAKE).MarshalBinary", Method, 24, ""}, + {"(*SHAKE).Read", Method, 24, ""}, + {"(*SHAKE).Reset", Method, 24, ""}, + {"(*SHAKE).UnmarshalBinary", Method, 24, ""}, + {"(*SHAKE).Write", Method, 24, ""}, + {"New224", Func, 24, "func() *SHA3"}, + {"New256", Func, 24, "func() *SHA3"}, + {"New384", Func, 24, "func() *SHA3"}, + {"New512", Func, 24, "func() *SHA3"}, + {"NewCSHAKE128", Func, 24, "func(N []byte, S []byte) *SHAKE"}, + {"NewCSHAKE256", Func, 24, "func(N []byte, S []byte) *SHAKE"}, + {"NewSHAKE128", Func, 24, "func() *SHAKE"}, + {"NewSHAKE256", Func, 24, "func() *SHAKE"}, + {"SHA3", Type, 24, ""}, + {"SHAKE", Type, 24, ""}, + {"Sum224", Func, 24, "func(data []byte) [28]byte"}, + {"Sum256", Func, 24, "func(data []byte) [32]byte"}, + {"Sum384", Func, 24, "func(data []byte) [48]byte"}, + {"Sum512", Func, 24, "func(data []byte) [64]byte"}, + {"SumSHAKE128", Func, 24, "func(data []byte, length int) []byte"}, + {"SumSHAKE256", Func, 24, "func(data []byte, length int) []byte"}, + }, + "crypto/sha512": { + {"BlockSize", Const, 0, ""}, + {"New", Func, 0, "func() hash.Hash"}, + {"New384", Func, 0, "func() hash.Hash"}, + {"New512_224", Func, 5, "func() hash.Hash"}, + {"New512_256", Func, 5, "func() hash.Hash"}, + {"Size", Const, 0, ""}, + {"Size224", Const, 5, ""}, + {"Size256", Const, 5, ""}, + {"Size384", Const, 0, ""}, + {"Sum384", Func, 2, "func(data []byte) [48]byte"}, + {"Sum512", Func, 2, "func(data []byte) [64]byte"}, + {"Sum512_224", Func, 5, "func(data []byte) [28]byte"}, + {"Sum512_256", Func, 5, "func(data []byte) [32]byte"}, + }, + "crypto/subtle": { + {"ConstantTimeByteEq", Func, 0, "func(x uint8, y uint8) int"}, + {"ConstantTimeCompare", Func, 0, "func(x []byte, y []byte) int"}, + {"ConstantTimeCopy", Func, 0, "func(v int, x []byte, y []byte)"}, + {"ConstantTimeEq", Func, 0, "func(x int32, y int32) int"}, + {"ConstantTimeLessOrEq", Func, 2, "func(x int, y int) int"}, + {"ConstantTimeSelect", Func, 0, "func(v int, x int, y int) int"}, + {"WithDataIndependentTiming", Func, 24, "func(f func())"}, + {"XORBytes", Func, 20, "func(dst []byte, x []byte, y []byte) int"}, + }, + "crypto/tls": { + {"(*CertificateRequestInfo).Context", Method, 17, ""}, + {"(*CertificateRequestInfo).SupportsCertificate", Method, 14, ""}, + {"(*CertificateVerificationError).Error", Method, 20, ""}, + {"(*CertificateVerificationError).Unwrap", Method, 20, ""}, + {"(*ClientHelloInfo).Context", Method, 17, ""}, + {"(*ClientHelloInfo).SupportsCertificate", Method, 14, ""}, + {"(*ClientSessionState).ResumptionState", Method, 21, ""}, + {"(*Config).BuildNameToCertificate", Method, 0, ""}, + {"(*Config).Clone", Method, 8, ""}, + {"(*Config).DecryptTicket", Method, 21, ""}, + {"(*Config).EncryptTicket", Method, 21, ""}, + {"(*Config).SetSessionTicketKeys", Method, 5, ""}, + {"(*Conn).Close", Method, 0, ""}, + {"(*Conn).CloseWrite", Method, 8, ""}, + {"(*Conn).ConnectionState", Method, 0, ""}, + {"(*Conn).Handshake", Method, 0, ""}, + {"(*Conn).HandshakeContext", Method, 17, ""}, + {"(*Conn).LocalAddr", Method, 0, ""}, + {"(*Conn).NetConn", Method, 18, ""}, + {"(*Conn).OCSPResponse", Method, 0, ""}, + {"(*Conn).Read", Method, 0, ""}, + {"(*Conn).RemoteAddr", Method, 0, ""}, + {"(*Conn).SetDeadline", Method, 0, ""}, + {"(*Conn).SetReadDeadline", Method, 0, ""}, + {"(*Conn).SetWriteDeadline", Method, 0, ""}, + {"(*Conn).VerifyHostname", Method, 0, ""}, + {"(*Conn).Write", Method, 0, ""}, + {"(*ConnectionState).ExportKeyingMaterial", Method, 11, ""}, + {"(*Dialer).Dial", Method, 15, ""}, + {"(*Dialer).DialContext", Method, 15, ""}, + {"(*ECHRejectionError).Error", Method, 23, ""}, + {"(*QUICConn).Close", Method, 21, ""}, + {"(*QUICConn).ConnectionState", Method, 21, ""}, + {"(*QUICConn).HandleData", Method, 21, ""}, + {"(*QUICConn).NextEvent", Method, 21, ""}, + {"(*QUICConn).SendSessionTicket", Method, 21, ""}, + {"(*QUICConn).SetTransportParameters", Method, 21, ""}, + {"(*QUICConn).Start", Method, 21, ""}, + {"(*QUICConn).StoreSession", Method, 23, ""}, + {"(*SessionState).Bytes", Method, 21, ""}, + {"(AlertError).Error", Method, 21, ""}, + {"(ClientAuthType).String", Method, 15, ""}, + {"(CurveID).String", Method, 15, ""}, + {"(QUICEncryptionLevel).String", Method, 21, ""}, + {"(RecordHeaderError).Error", Method, 6, ""}, + {"(SignatureScheme).String", Method, 15, ""}, + {"AlertError", Type, 21, ""}, + {"Certificate", Type, 0, ""}, + {"Certificate.Certificate", Field, 0, ""}, + {"Certificate.Leaf", Field, 0, ""}, + {"Certificate.OCSPStaple", Field, 0, ""}, + {"Certificate.PrivateKey", Field, 0, ""}, + {"Certificate.SignedCertificateTimestamps", Field, 5, ""}, + {"Certificate.SupportedSignatureAlgorithms", Field, 14, ""}, + {"CertificateRequestInfo", Type, 8, ""}, + {"CertificateRequestInfo.AcceptableCAs", Field, 8, ""}, + {"CertificateRequestInfo.SignatureSchemes", Field, 8, ""}, + {"CertificateRequestInfo.Version", Field, 14, ""}, + {"CertificateVerificationError", Type, 20, ""}, + {"CertificateVerificationError.Err", Field, 20, ""}, + {"CertificateVerificationError.UnverifiedCertificates", Field, 20, ""}, + {"CipherSuite", Type, 14, ""}, + {"CipherSuite.ID", Field, 14, ""}, + {"CipherSuite.Insecure", Field, 14, ""}, + {"CipherSuite.Name", Field, 14, ""}, + {"CipherSuite.SupportedVersions", Field, 14, ""}, + {"CipherSuiteName", Func, 14, "func(id uint16) string"}, + {"CipherSuites", Func, 14, "func() []*CipherSuite"}, + {"Client", Func, 0, "func(conn net.Conn, config *Config) *Conn"}, + {"ClientAuthType", Type, 0, ""}, + {"ClientHelloInfo", Type, 4, ""}, + {"ClientHelloInfo.CipherSuites", Field, 4, ""}, + {"ClientHelloInfo.Conn", Field, 8, ""}, + {"ClientHelloInfo.Extensions", Field, 24, ""}, + {"ClientHelloInfo.ServerName", Field, 4, ""}, + {"ClientHelloInfo.SignatureSchemes", Field, 8, ""}, + {"ClientHelloInfo.SupportedCurves", Field, 4, ""}, + {"ClientHelloInfo.SupportedPoints", Field, 4, ""}, + {"ClientHelloInfo.SupportedProtos", Field, 8, ""}, + {"ClientHelloInfo.SupportedVersions", Field, 8, ""}, + {"ClientSessionCache", Type, 3, ""}, + {"ClientSessionState", Type, 3, ""}, + {"Config", Type, 0, ""}, + {"Config.Certificates", Field, 0, ""}, + {"Config.CipherSuites", Field, 0, ""}, + {"Config.ClientAuth", Field, 0, ""}, + {"Config.ClientCAs", Field, 0, ""}, + {"Config.ClientSessionCache", Field, 3, ""}, + {"Config.CurvePreferences", Field, 3, ""}, + {"Config.DynamicRecordSizingDisabled", Field, 7, ""}, + {"Config.EncryptedClientHelloConfigList", Field, 23, ""}, + {"Config.EncryptedClientHelloKeys", Field, 24, ""}, + {"Config.EncryptedClientHelloRejectionVerify", Field, 23, ""}, + {"Config.GetCertificate", Field, 4, ""}, + {"Config.GetClientCertificate", Field, 8, ""}, + {"Config.GetConfigForClient", Field, 8, ""}, + {"Config.InsecureSkipVerify", Field, 0, ""}, + {"Config.KeyLogWriter", Field, 8, ""}, + {"Config.MaxVersion", Field, 2, ""}, + {"Config.MinVersion", Field, 2, ""}, + {"Config.NameToCertificate", Field, 0, ""}, + {"Config.NextProtos", Field, 0, ""}, + {"Config.PreferServerCipherSuites", Field, 1, ""}, + {"Config.Rand", Field, 0, ""}, + {"Config.Renegotiation", Field, 7, ""}, + {"Config.RootCAs", Field, 0, ""}, + {"Config.ServerName", Field, 0, ""}, + {"Config.SessionTicketKey", Field, 1, ""}, + {"Config.SessionTicketsDisabled", Field, 1, ""}, + {"Config.Time", Field, 0, ""}, + {"Config.UnwrapSession", Field, 21, ""}, + {"Config.VerifyConnection", Field, 15, ""}, + {"Config.VerifyPeerCertificate", Field, 8, ""}, + {"Config.WrapSession", Field, 21, ""}, + {"Conn", Type, 0, ""}, + {"ConnectionState", Type, 0, ""}, + {"ConnectionState.CipherSuite", Field, 0, ""}, + {"ConnectionState.CurveID", Field, 25, ""}, + {"ConnectionState.DidResume", Field, 1, ""}, + {"ConnectionState.ECHAccepted", Field, 23, ""}, + {"ConnectionState.HandshakeComplete", Field, 0, ""}, + {"ConnectionState.NegotiatedProtocol", Field, 0, ""}, + {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""}, + {"ConnectionState.OCSPResponse", Field, 5, ""}, + {"ConnectionState.PeerCertificates", Field, 0, ""}, + {"ConnectionState.ServerName", Field, 0, ""}, + {"ConnectionState.SignedCertificateTimestamps", Field, 5, ""}, + {"ConnectionState.TLSUnique", Field, 4, ""}, + {"ConnectionState.VerifiedChains", Field, 0, ""}, + {"ConnectionState.Version", Field, 3, ""}, + {"CurveID", Type, 3, ""}, + {"CurveP256", Const, 3, ""}, + {"CurveP384", Const, 3, ""}, + {"CurveP521", Const, 3, ""}, + {"Dial", Func, 0, "func(network string, addr string, config *Config) (*Conn, error)"}, + {"DialWithDialer", Func, 3, "func(dialer *net.Dialer, network string, addr string, config *Config) (*Conn, error)"}, + {"Dialer", Type, 15, ""}, + {"Dialer.Config", Field, 15, ""}, + {"Dialer.NetDialer", Field, 15, ""}, + {"ECDSAWithP256AndSHA256", Const, 8, ""}, + {"ECDSAWithP384AndSHA384", Const, 8, ""}, + {"ECDSAWithP521AndSHA512", Const, 8, ""}, + {"ECDSAWithSHA1", Const, 10, ""}, + {"ECHRejectionError", Type, 23, ""}, + {"ECHRejectionError.RetryConfigList", Field, 23, ""}, + {"Ed25519", Const, 13, ""}, + {"EncryptedClientHelloKey", Type, 24, ""}, + {"EncryptedClientHelloKey.Config", Field, 24, ""}, + {"EncryptedClientHelloKey.PrivateKey", Field, 24, ""}, + {"EncryptedClientHelloKey.SendAsRetry", Field, 24, ""}, + {"InsecureCipherSuites", Func, 14, "func() []*CipherSuite"}, + {"Listen", Func, 0, "func(network string, laddr string, config *Config) (net.Listener, error)"}, + {"LoadX509KeyPair", Func, 0, "func(certFile string, keyFile string) (Certificate, error)"}, + {"NewLRUClientSessionCache", Func, 3, "func(capacity int) ClientSessionCache"}, + {"NewListener", Func, 0, "func(inner net.Listener, config *Config) net.Listener"}, + {"NewResumptionState", Func, 21, "func(ticket []byte, state *SessionState) (*ClientSessionState, error)"}, + {"NoClientCert", Const, 0, ""}, + {"PKCS1WithSHA1", Const, 8, ""}, + {"PKCS1WithSHA256", Const, 8, ""}, + {"PKCS1WithSHA384", Const, 8, ""}, + {"PKCS1WithSHA512", Const, 8, ""}, + {"PSSWithSHA256", Const, 8, ""}, + {"PSSWithSHA384", Const, 8, ""}, + {"PSSWithSHA512", Const, 8, ""}, + {"ParseSessionState", Func, 21, "func(data []byte) (*SessionState, error)"}, + {"QUICClient", Func, 21, "func(config *QUICConfig) *QUICConn"}, + {"QUICConfig", Type, 21, ""}, + {"QUICConfig.EnableSessionEvents", Field, 23, ""}, + {"QUICConfig.TLSConfig", Field, 21, ""}, + {"QUICConn", Type, 21, ""}, + {"QUICEncryptionLevel", Type, 21, ""}, + {"QUICEncryptionLevelApplication", Const, 21, ""}, + {"QUICEncryptionLevelEarly", Const, 21, ""}, + {"QUICEncryptionLevelHandshake", Const, 21, ""}, + {"QUICEncryptionLevelInitial", Const, 21, ""}, + {"QUICEvent", Type, 21, ""}, + {"QUICEvent.Data", Field, 21, ""}, + {"QUICEvent.Kind", Field, 21, ""}, + {"QUICEvent.Level", Field, 21, ""}, + {"QUICEvent.SessionState", Field, 23, ""}, + {"QUICEvent.Suite", Field, 21, ""}, + {"QUICEventKind", Type, 21, ""}, + {"QUICHandshakeDone", Const, 21, ""}, + {"QUICNoEvent", Const, 21, ""}, + {"QUICRejectedEarlyData", Const, 21, ""}, + {"QUICResumeSession", Const, 23, ""}, + {"QUICServer", Func, 21, "func(config *QUICConfig) *QUICConn"}, + {"QUICSessionTicketOptions", Type, 21, ""}, + {"QUICSessionTicketOptions.EarlyData", Field, 21, ""}, + {"QUICSessionTicketOptions.Extra", Field, 23, ""}, + {"QUICSetReadSecret", Const, 21, ""}, + {"QUICSetWriteSecret", Const, 21, ""}, + {"QUICStoreSession", Const, 23, ""}, + {"QUICTransportParameters", Const, 21, ""}, + {"QUICTransportParametersRequired", Const, 21, ""}, + {"QUICWriteData", Const, 21, ""}, + {"RecordHeaderError", Type, 6, ""}, + {"RecordHeaderError.Conn", Field, 12, ""}, + {"RecordHeaderError.Msg", Field, 6, ""}, + {"RecordHeaderError.RecordHeader", Field, 6, ""}, + {"RenegotiateFreelyAsClient", Const, 7, ""}, + {"RenegotiateNever", Const, 7, ""}, + {"RenegotiateOnceAsClient", Const, 7, ""}, + {"RenegotiationSupport", Type, 7, ""}, + {"RequestClientCert", Const, 0, ""}, + {"RequireAndVerifyClientCert", Const, 0, ""}, + {"RequireAnyClientCert", Const, 0, ""}, + {"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"}, + {"SessionState", Type, 21, ""}, + {"SessionState.EarlyData", Field, 21, ""}, + {"SessionState.Extra", Field, 21, ""}, + {"SignatureScheme", Type, 8, ""}, + {"TLS_AES_128_GCM_SHA256", Const, 12, ""}, + {"TLS_AES_256_GCM_SHA384", Const, 12, ""}, + {"TLS_CHACHA20_POLY1305_SHA256", Const, 12, ""}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2, ""}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8, ""}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2, ""}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2, ""}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5, ""}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8, ""}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""}, + {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2, ""}, + {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""}, + {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2, ""}, + {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""}, + {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5, ""}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8, ""}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""}, + {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0, ""}, + {"TLS_FALLBACK_SCSV", Const, 4, ""}, + {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""}, + {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""}, + {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""}, + {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6, ""}, + {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""}, + {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6, ""}, + {"TLS_RSA_WITH_RC4_128_SHA", Const, 0, ""}, + {"VerifyClientCertIfGiven", Const, 0, ""}, + {"VersionName", Func, 21, "func(version uint16) string"}, + {"VersionSSL30", Const, 2, ""}, + {"VersionTLS10", Const, 2, ""}, + {"VersionTLS11", Const, 2, ""}, + {"VersionTLS12", Const, 2, ""}, + {"VersionTLS13", Const, 12, ""}, + {"X25519", Const, 8, ""}, + {"X25519MLKEM768", Const, 24, ""}, + {"X509KeyPair", Func, 0, "func(certPEMBlock []byte, keyPEMBlock []byte) (Certificate, error)"}, + }, + "crypto/x509": { + {"(*CertPool).AddCert", Method, 0, ""}, + {"(*CertPool).AddCertWithConstraint", Method, 22, ""}, + {"(*CertPool).AppendCertsFromPEM", Method, 0, ""}, + {"(*CertPool).Clone", Method, 19, ""}, + {"(*CertPool).Equal", Method, 19, ""}, + {"(*CertPool).Subjects", Method, 0, ""}, + {"(*Certificate).CheckCRLSignature", Method, 0, ""}, + {"(*Certificate).CheckSignature", Method, 0, ""}, + {"(*Certificate).CheckSignatureFrom", Method, 0, ""}, + {"(*Certificate).CreateCRL", Method, 0, ""}, + {"(*Certificate).Equal", Method, 0, ""}, + {"(*Certificate).Verify", Method, 0, ""}, + {"(*Certificate).VerifyHostname", Method, 0, ""}, + {"(*CertificateRequest).CheckSignature", Method, 5, ""}, + {"(*OID).UnmarshalBinary", Method, 23, ""}, + {"(*OID).UnmarshalText", Method, 23, ""}, + {"(*RevocationList).CheckSignatureFrom", Method, 19, ""}, + {"(CertificateInvalidError).Error", Method, 0, ""}, + {"(ConstraintViolationError).Error", Method, 0, ""}, + {"(HostnameError).Error", Method, 0, ""}, + {"(InsecureAlgorithmError).Error", Method, 6, ""}, + {"(OID).AppendBinary", Method, 24, ""}, + {"(OID).AppendText", Method, 24, ""}, + {"(OID).Equal", Method, 22, ""}, + {"(OID).EqualASN1OID", Method, 22, ""}, + {"(OID).MarshalBinary", Method, 23, ""}, + {"(OID).MarshalText", Method, 23, ""}, + {"(OID).String", Method, 22, ""}, + {"(PublicKeyAlgorithm).String", Method, 10, ""}, + {"(SignatureAlgorithm).String", Method, 6, ""}, + {"(SystemRootsError).Error", Method, 1, ""}, + {"(SystemRootsError).Unwrap", Method, 16, ""}, + {"(UnhandledCriticalExtension).Error", Method, 0, ""}, + {"(UnknownAuthorityError).Error", Method, 0, ""}, + {"CANotAuthorizedForExtKeyUsage", Const, 10, ""}, + {"CANotAuthorizedForThisName", Const, 0, ""}, + {"CertPool", Type, 0, ""}, + {"Certificate", Type, 0, ""}, + {"Certificate.AuthorityKeyId", Field, 0, ""}, + {"Certificate.BasicConstraintsValid", Field, 0, ""}, + {"Certificate.CRLDistributionPoints", Field, 2, ""}, + {"Certificate.DNSNames", Field, 0, ""}, + {"Certificate.EmailAddresses", Field, 0, ""}, + {"Certificate.ExcludedDNSDomains", Field, 9, ""}, + {"Certificate.ExcludedEmailAddresses", Field, 10, ""}, + {"Certificate.ExcludedIPRanges", Field, 10, ""}, + {"Certificate.ExcludedURIDomains", Field, 10, ""}, + {"Certificate.ExtKeyUsage", Field, 0, ""}, + {"Certificate.Extensions", Field, 2, ""}, + {"Certificate.ExtraExtensions", Field, 2, ""}, + {"Certificate.IPAddresses", Field, 1, ""}, + {"Certificate.InhibitAnyPolicy", Field, 24, ""}, + {"Certificate.InhibitAnyPolicyZero", Field, 24, ""}, + {"Certificate.InhibitPolicyMapping", Field, 24, ""}, + {"Certificate.InhibitPolicyMappingZero", Field, 24, ""}, + {"Certificate.IsCA", Field, 0, ""}, + {"Certificate.Issuer", Field, 0, ""}, + {"Certificate.IssuingCertificateURL", Field, 2, ""}, + {"Certificate.KeyUsage", Field, 0, ""}, + {"Certificate.MaxPathLen", Field, 0, ""}, + {"Certificate.MaxPathLenZero", Field, 4, ""}, + {"Certificate.NotAfter", Field, 0, ""}, + {"Certificate.NotBefore", Field, 0, ""}, + {"Certificate.OCSPServer", Field, 2, ""}, + {"Certificate.PermittedDNSDomains", Field, 0, ""}, + {"Certificate.PermittedDNSDomainsCritical", Field, 0, ""}, + {"Certificate.PermittedEmailAddresses", Field, 10, ""}, + {"Certificate.PermittedIPRanges", Field, 10, ""}, + {"Certificate.PermittedURIDomains", Field, 10, ""}, + {"Certificate.Policies", Field, 22, ""}, + {"Certificate.PolicyIdentifiers", Field, 0, ""}, + {"Certificate.PolicyMappings", Field, 24, ""}, + {"Certificate.PublicKey", Field, 0, ""}, + {"Certificate.PublicKeyAlgorithm", Field, 0, ""}, + {"Certificate.Raw", Field, 0, ""}, + {"Certificate.RawIssuer", Field, 0, ""}, + {"Certificate.RawSubject", Field, 0, ""}, + {"Certificate.RawSubjectPublicKeyInfo", Field, 0, ""}, + {"Certificate.RawTBSCertificate", Field, 0, ""}, + {"Certificate.RequireExplicitPolicy", Field, 24, ""}, + {"Certificate.RequireExplicitPolicyZero", Field, 24, ""}, + {"Certificate.SerialNumber", Field, 0, ""}, + {"Certificate.Signature", Field, 0, ""}, + {"Certificate.SignatureAlgorithm", Field, 0, ""}, + {"Certificate.Subject", Field, 0, ""}, + {"Certificate.SubjectKeyId", Field, 0, ""}, + {"Certificate.URIs", Field, 10, ""}, + {"Certificate.UnhandledCriticalExtensions", Field, 5, ""}, + {"Certificate.UnknownExtKeyUsage", Field, 0, ""}, + {"Certificate.Version", Field, 0, ""}, + {"CertificateInvalidError", Type, 0, ""}, + {"CertificateInvalidError.Cert", Field, 0, ""}, + {"CertificateInvalidError.Detail", Field, 10, ""}, + {"CertificateInvalidError.Reason", Field, 0, ""}, + {"CertificateRequest", Type, 3, ""}, + {"CertificateRequest.Attributes", Field, 3, ""}, + {"CertificateRequest.DNSNames", Field, 3, ""}, + {"CertificateRequest.EmailAddresses", Field, 3, ""}, + {"CertificateRequest.Extensions", Field, 3, ""}, + {"CertificateRequest.ExtraExtensions", Field, 3, ""}, + {"CertificateRequest.IPAddresses", Field, 3, ""}, + {"CertificateRequest.PublicKey", Field, 3, ""}, + {"CertificateRequest.PublicKeyAlgorithm", Field, 3, ""}, + {"CertificateRequest.Raw", Field, 3, ""}, + {"CertificateRequest.RawSubject", Field, 3, ""}, + {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3, ""}, + {"CertificateRequest.RawTBSCertificateRequest", Field, 3, ""}, + {"CertificateRequest.Signature", Field, 3, ""}, + {"CertificateRequest.SignatureAlgorithm", Field, 3, ""}, + {"CertificateRequest.Subject", Field, 3, ""}, + {"CertificateRequest.URIs", Field, 10, ""}, + {"CertificateRequest.Version", Field, 3, ""}, + {"ConstraintViolationError", Type, 0, ""}, + {"CreateCertificate", Func, 0, "func(rand io.Reader, template *Certificate, parent *Certificate, pub any, priv any) ([]byte, error)"}, + {"CreateCertificateRequest", Func, 3, "func(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error)"}, + {"CreateRevocationList", Func, 15, "func(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error)"}, + {"DSA", Const, 0, ""}, + {"DSAWithSHA1", Const, 0, ""}, + {"DSAWithSHA256", Const, 0, ""}, + {"DecryptPEMBlock", Func, 1, "func(b *pem.Block, password []byte) ([]byte, error)"}, + {"ECDSA", Const, 1, ""}, + {"ECDSAWithSHA1", Const, 1, ""}, + {"ECDSAWithSHA256", Const, 1, ""}, + {"ECDSAWithSHA384", Const, 1, ""}, + {"ECDSAWithSHA512", Const, 1, ""}, + {"Ed25519", Const, 13, ""}, + {"EncryptPEMBlock", Func, 1, "func(rand io.Reader, blockType string, data []byte, password []byte, alg PEMCipher) (*pem.Block, error)"}, + {"ErrUnsupportedAlgorithm", Var, 0, ""}, + {"Expired", Const, 0, ""}, + {"ExtKeyUsage", Type, 0, ""}, + {"ExtKeyUsageAny", Const, 0, ""}, + {"ExtKeyUsageClientAuth", Const, 0, ""}, + {"ExtKeyUsageCodeSigning", Const, 0, ""}, + {"ExtKeyUsageEmailProtection", Const, 0, ""}, + {"ExtKeyUsageIPSECEndSystem", Const, 1, ""}, + {"ExtKeyUsageIPSECTunnel", Const, 1, ""}, + {"ExtKeyUsageIPSECUser", Const, 1, ""}, + {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10, ""}, + {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10, ""}, + {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1, ""}, + {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1, ""}, + {"ExtKeyUsageOCSPSigning", Const, 0, ""}, + {"ExtKeyUsageServerAuth", Const, 0, ""}, + {"ExtKeyUsageTimeStamping", Const, 0, ""}, + {"HostnameError", Type, 0, ""}, + {"HostnameError.Certificate", Field, 0, ""}, + {"HostnameError.Host", Field, 0, ""}, + {"IncompatibleUsage", Const, 1, ""}, + {"IncorrectPasswordError", Var, 1, ""}, + {"InsecureAlgorithmError", Type, 6, ""}, + {"InvalidReason", Type, 0, ""}, + {"IsEncryptedPEMBlock", Func, 1, "func(b *pem.Block) bool"}, + {"KeyUsage", Type, 0, ""}, + {"KeyUsageCRLSign", Const, 0, ""}, + {"KeyUsageCertSign", Const, 0, ""}, + {"KeyUsageContentCommitment", Const, 0, ""}, + {"KeyUsageDataEncipherment", Const, 0, ""}, + {"KeyUsageDecipherOnly", Const, 0, ""}, + {"KeyUsageDigitalSignature", Const, 0, ""}, + {"KeyUsageEncipherOnly", Const, 0, ""}, + {"KeyUsageKeyAgreement", Const, 0, ""}, + {"KeyUsageKeyEncipherment", Const, 0, ""}, + {"MD2WithRSA", Const, 0, ""}, + {"MD5WithRSA", Const, 0, ""}, + {"MarshalECPrivateKey", Func, 2, "func(key *ecdsa.PrivateKey) ([]byte, error)"}, + {"MarshalPKCS1PrivateKey", Func, 0, "func(key *rsa.PrivateKey) []byte"}, + {"MarshalPKCS1PublicKey", Func, 10, "func(key *rsa.PublicKey) []byte"}, + {"MarshalPKCS8PrivateKey", Func, 10, "func(key any) ([]byte, error)"}, + {"MarshalPKIXPublicKey", Func, 0, "func(pub any) ([]byte, error)"}, + {"NameConstraintsWithoutSANs", Const, 10, ""}, + {"NameMismatch", Const, 8, ""}, + {"NewCertPool", Func, 0, "func() *CertPool"}, + {"NoValidChains", Const, 24, ""}, + {"NotAuthorizedToSign", Const, 0, ""}, + {"OID", Type, 22, ""}, + {"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"}, + {"PEMCipher", Type, 1, ""}, + {"PEMCipher3DES", Const, 1, ""}, + {"PEMCipherAES128", Const, 1, ""}, + {"PEMCipherAES192", Const, 1, ""}, + {"PEMCipherAES256", Const, 1, ""}, + {"PEMCipherDES", Const, 1, ""}, + {"ParseCRL", Func, 0, "func(crlBytes []byte) (*pkix.CertificateList, error)"}, + {"ParseCertificate", Func, 0, "func(der []byte) (*Certificate, error)"}, + {"ParseCertificateRequest", Func, 3, "func(asn1Data []byte) (*CertificateRequest, error)"}, + {"ParseCertificates", Func, 0, "func(der []byte) ([]*Certificate, error)"}, + {"ParseDERCRL", Func, 0, "func(derBytes []byte) (*pkix.CertificateList, error)"}, + {"ParseECPrivateKey", Func, 1, "func(der []byte) (*ecdsa.PrivateKey, error)"}, + {"ParseOID", Func, 23, "func(oid string) (OID, error)"}, + {"ParsePKCS1PrivateKey", Func, 0, "func(der []byte) (*rsa.PrivateKey, error)"}, + {"ParsePKCS1PublicKey", Func, 10, "func(der []byte) (*rsa.PublicKey, error)"}, + {"ParsePKCS8PrivateKey", Func, 0, "func(der []byte) (key any, err error)"}, + {"ParsePKIXPublicKey", Func, 0, "func(derBytes []byte) (pub any, err error)"}, + {"ParseRevocationList", Func, 19, "func(der []byte) (*RevocationList, error)"}, + {"PolicyMapping", Type, 24, ""}, + {"PolicyMapping.IssuerDomainPolicy", Field, 24, ""}, + {"PolicyMapping.SubjectDomainPolicy", Field, 24, ""}, + {"PublicKeyAlgorithm", Type, 0, ""}, + {"PureEd25519", Const, 13, ""}, + {"RSA", Const, 0, ""}, + {"RevocationList", Type, 15, ""}, + {"RevocationList.AuthorityKeyId", Field, 19, ""}, + {"RevocationList.Extensions", Field, 19, ""}, + {"RevocationList.ExtraExtensions", Field, 15, ""}, + {"RevocationList.Issuer", Field, 19, ""}, + {"RevocationList.NextUpdate", Field, 15, ""}, + {"RevocationList.Number", Field, 15, ""}, + {"RevocationList.Raw", Field, 19, ""}, + {"RevocationList.RawIssuer", Field, 19, ""}, + {"RevocationList.RawTBSRevocationList", Field, 19, ""}, + {"RevocationList.RevokedCertificateEntries", Field, 21, ""}, + {"RevocationList.RevokedCertificates", Field, 15, ""}, + {"RevocationList.Signature", Field, 19, ""}, + {"RevocationList.SignatureAlgorithm", Field, 15, ""}, + {"RevocationList.ThisUpdate", Field, 15, ""}, + {"RevocationListEntry", Type, 21, ""}, + {"RevocationListEntry.Extensions", Field, 21, ""}, + {"RevocationListEntry.ExtraExtensions", Field, 21, ""}, + {"RevocationListEntry.Raw", Field, 21, ""}, + {"RevocationListEntry.ReasonCode", Field, 21, ""}, + {"RevocationListEntry.RevocationTime", Field, 21, ""}, + {"RevocationListEntry.SerialNumber", Field, 21, ""}, + {"SHA1WithRSA", Const, 0, ""}, + {"SHA256WithRSA", Const, 0, ""}, + {"SHA256WithRSAPSS", Const, 8, ""}, + {"SHA384WithRSA", Const, 0, ""}, + {"SHA384WithRSAPSS", Const, 8, ""}, + {"SHA512WithRSA", Const, 0, ""}, + {"SHA512WithRSAPSS", Const, 8, ""}, + {"SetFallbackRoots", Func, 20, "func(roots *CertPool)"}, + {"SignatureAlgorithm", Type, 0, ""}, + {"SystemCertPool", Func, 7, "func() (*CertPool, error)"}, + {"SystemRootsError", Type, 1, ""}, + {"SystemRootsError.Err", Field, 7, ""}, + {"TooManyConstraints", Const, 10, ""}, + {"TooManyIntermediates", Const, 0, ""}, + {"UnconstrainedName", Const, 10, ""}, + {"UnhandledCriticalExtension", Type, 0, ""}, + {"UnknownAuthorityError", Type, 0, ""}, + {"UnknownAuthorityError.Cert", Field, 8, ""}, + {"UnknownPublicKeyAlgorithm", Const, 0, ""}, + {"UnknownSignatureAlgorithm", Const, 0, ""}, + {"VerifyOptions", Type, 0, ""}, + {"VerifyOptions.CertificatePolicies", Field, 24, ""}, + {"VerifyOptions.CurrentTime", Field, 0, ""}, + {"VerifyOptions.DNSName", Field, 0, ""}, + {"VerifyOptions.Intermediates", Field, 0, ""}, + {"VerifyOptions.KeyUsages", Field, 1, ""}, + {"VerifyOptions.MaxConstraintComparisions", Field, 10, ""}, + {"VerifyOptions.Roots", Field, 0, ""}, + }, + "crypto/x509/pkix": { + {"(*CertificateList).HasExpired", Method, 0, ""}, + {"(*Name).FillFromRDNSequence", Method, 0, ""}, + {"(Name).String", Method, 10, ""}, + {"(Name).ToRDNSequence", Method, 0, ""}, + {"(RDNSequence).String", Method, 10, ""}, + {"AlgorithmIdentifier", Type, 0, ""}, + {"AlgorithmIdentifier.Algorithm", Field, 0, ""}, + {"AlgorithmIdentifier.Parameters", Field, 0, ""}, + {"AttributeTypeAndValue", Type, 0, ""}, + {"AttributeTypeAndValue.Type", Field, 0, ""}, + {"AttributeTypeAndValue.Value", Field, 0, ""}, + {"AttributeTypeAndValueSET", Type, 3, ""}, + {"AttributeTypeAndValueSET.Type", Field, 3, ""}, + {"AttributeTypeAndValueSET.Value", Field, 3, ""}, + {"CertificateList", Type, 0, ""}, + {"CertificateList.SignatureAlgorithm", Field, 0, ""}, + {"CertificateList.SignatureValue", Field, 0, ""}, + {"CertificateList.TBSCertList", Field, 0, ""}, + {"Extension", Type, 0, ""}, + {"Extension.Critical", Field, 0, ""}, + {"Extension.Id", Field, 0, ""}, + {"Extension.Value", Field, 0, ""}, + {"Name", Type, 0, ""}, + {"Name.CommonName", Field, 0, ""}, + {"Name.Country", Field, 0, ""}, + {"Name.ExtraNames", Field, 5, ""}, + {"Name.Locality", Field, 0, ""}, + {"Name.Names", Field, 0, ""}, + {"Name.Organization", Field, 0, ""}, + {"Name.OrganizationalUnit", Field, 0, ""}, + {"Name.PostalCode", Field, 0, ""}, + {"Name.Province", Field, 0, ""}, + {"Name.SerialNumber", Field, 0, ""}, + {"Name.StreetAddress", Field, 0, ""}, + {"RDNSequence", Type, 0, ""}, + {"RelativeDistinguishedNameSET", Type, 0, ""}, + {"RevokedCertificate", Type, 0, ""}, + {"RevokedCertificate.Extensions", Field, 0, ""}, + {"RevokedCertificate.RevocationTime", Field, 0, ""}, + {"RevokedCertificate.SerialNumber", Field, 0, ""}, + {"TBSCertificateList", Type, 0, ""}, + {"TBSCertificateList.Extensions", Field, 0, ""}, + {"TBSCertificateList.Issuer", Field, 0, ""}, + {"TBSCertificateList.NextUpdate", Field, 0, ""}, + {"TBSCertificateList.Raw", Field, 0, ""}, + {"TBSCertificateList.RevokedCertificates", Field, 0, ""}, + {"TBSCertificateList.Signature", Field, 0, ""}, + {"TBSCertificateList.ThisUpdate", Field, 0, ""}, + {"TBSCertificateList.Version", Field, 0, ""}, + }, + "database/sql": { + {"(*ColumnType).DatabaseTypeName", Method, 8, ""}, + {"(*ColumnType).DecimalSize", Method, 8, ""}, + {"(*ColumnType).Length", Method, 8, ""}, + {"(*ColumnType).Name", Method, 8, ""}, + {"(*ColumnType).Nullable", Method, 8, ""}, + {"(*ColumnType).ScanType", Method, 8, ""}, + {"(*Conn).BeginTx", Method, 9, ""}, + {"(*Conn).Close", Method, 9, ""}, + {"(*Conn).ExecContext", Method, 9, ""}, + {"(*Conn).PingContext", Method, 9, ""}, + {"(*Conn).PrepareContext", Method, 9, ""}, + {"(*Conn).QueryContext", Method, 9, ""}, + {"(*Conn).QueryRowContext", Method, 9, ""}, + {"(*Conn).Raw", Method, 13, ""}, + {"(*DB).Begin", Method, 0, ""}, + {"(*DB).BeginTx", Method, 8, ""}, + {"(*DB).Close", Method, 0, ""}, + {"(*DB).Conn", Method, 9, ""}, + {"(*DB).Driver", Method, 0, ""}, + {"(*DB).Exec", Method, 0, ""}, + {"(*DB).ExecContext", Method, 8, ""}, + {"(*DB).Ping", Method, 1, ""}, + {"(*DB).PingContext", Method, 8, ""}, + {"(*DB).Prepare", Method, 0, ""}, + {"(*DB).PrepareContext", Method, 8, ""}, + {"(*DB).Query", Method, 0, ""}, + {"(*DB).QueryContext", Method, 8, ""}, + {"(*DB).QueryRow", Method, 0, ""}, + {"(*DB).QueryRowContext", Method, 8, ""}, + {"(*DB).SetConnMaxIdleTime", Method, 15, ""}, + {"(*DB).SetConnMaxLifetime", Method, 6, ""}, + {"(*DB).SetMaxIdleConns", Method, 1, ""}, + {"(*DB).SetMaxOpenConns", Method, 2, ""}, + {"(*DB).Stats", Method, 5, ""}, + {"(*Null).Scan", Method, 22, ""}, + {"(*NullBool).Scan", Method, 0, ""}, + {"(*NullByte).Scan", Method, 17, ""}, + {"(*NullFloat64).Scan", Method, 0, ""}, + {"(*NullInt16).Scan", Method, 17, ""}, + {"(*NullInt32).Scan", Method, 13, ""}, + {"(*NullInt64).Scan", Method, 0, ""}, + {"(*NullString).Scan", Method, 0, ""}, + {"(*NullTime).Scan", Method, 13, ""}, + {"(*Row).Err", Method, 15, ""}, + {"(*Row).Scan", Method, 0, ""}, + {"(*Rows).Close", Method, 0, ""}, + {"(*Rows).ColumnTypes", Method, 8, ""}, + {"(*Rows).Columns", Method, 0, ""}, + {"(*Rows).Err", Method, 0, ""}, + {"(*Rows).Next", Method, 0, ""}, + {"(*Rows).NextResultSet", Method, 8, ""}, + {"(*Rows).Scan", Method, 0, ""}, + {"(*Stmt).Close", Method, 0, ""}, + {"(*Stmt).Exec", Method, 0, ""}, + {"(*Stmt).ExecContext", Method, 8, ""}, + {"(*Stmt).Query", Method, 0, ""}, + {"(*Stmt).QueryContext", Method, 8, ""}, + {"(*Stmt).QueryRow", Method, 0, ""}, + {"(*Stmt).QueryRowContext", Method, 8, ""}, + {"(*Tx).Commit", Method, 0, ""}, + {"(*Tx).Exec", Method, 0, ""}, + {"(*Tx).ExecContext", Method, 8, ""}, + {"(*Tx).Prepare", Method, 0, ""}, + {"(*Tx).PrepareContext", Method, 8, ""}, + {"(*Tx).Query", Method, 0, ""}, + {"(*Tx).QueryContext", Method, 8, ""}, + {"(*Tx).QueryRow", Method, 0, ""}, + {"(*Tx).QueryRowContext", Method, 8, ""}, + {"(*Tx).Rollback", Method, 0, ""}, + {"(*Tx).Stmt", Method, 0, ""}, + {"(*Tx).StmtContext", Method, 8, ""}, + {"(IsolationLevel).String", Method, 11, ""}, + {"(Null).Value", Method, 22, ""}, + {"(NullBool).Value", Method, 0, ""}, + {"(NullByte).Value", Method, 17, ""}, + {"(NullFloat64).Value", Method, 0, ""}, + {"(NullInt16).Value", Method, 17, ""}, + {"(NullInt32).Value", Method, 13, ""}, + {"(NullInt64).Value", Method, 0, ""}, + {"(NullString).Value", Method, 0, ""}, + {"(NullTime).Value", Method, 13, ""}, + {"ColumnType", Type, 8, ""}, + {"Conn", Type, 9, ""}, + {"DB", Type, 0, ""}, + {"DBStats", Type, 5, ""}, + {"DBStats.Idle", Field, 11, ""}, + {"DBStats.InUse", Field, 11, ""}, + {"DBStats.MaxIdleClosed", Field, 11, ""}, + {"DBStats.MaxIdleTimeClosed", Field, 15, ""}, + {"DBStats.MaxLifetimeClosed", Field, 11, ""}, + {"DBStats.MaxOpenConnections", Field, 11, ""}, + {"DBStats.OpenConnections", Field, 5, ""}, + {"DBStats.WaitCount", Field, 11, ""}, + {"DBStats.WaitDuration", Field, 11, ""}, + {"Drivers", Func, 4, "func() []string"}, + {"ErrConnDone", Var, 9, ""}, + {"ErrNoRows", Var, 0, ""}, + {"ErrTxDone", Var, 0, ""}, + {"IsolationLevel", Type, 8, ""}, + {"LevelDefault", Const, 8, ""}, + {"LevelLinearizable", Const, 8, ""}, + {"LevelReadCommitted", Const, 8, ""}, + {"LevelReadUncommitted", Const, 8, ""}, + {"LevelRepeatableRead", Const, 8, ""}, + {"LevelSerializable", Const, 8, ""}, + {"LevelSnapshot", Const, 8, ""}, + {"LevelWriteCommitted", Const, 8, ""}, + {"Named", Func, 8, "func(name string, value any) NamedArg"}, + {"NamedArg", Type, 8, ""}, + {"NamedArg.Name", Field, 8, ""}, + {"NamedArg.Value", Field, 8, ""}, + {"Null", Type, 22, ""}, + {"Null.V", Field, 22, ""}, + {"Null.Valid", Field, 22, ""}, + {"NullBool", Type, 0, ""}, + {"NullBool.Bool", Field, 0, ""}, + {"NullBool.Valid", Field, 0, ""}, + {"NullByte", Type, 17, ""}, + {"NullByte.Byte", Field, 17, ""}, + {"NullByte.Valid", Field, 17, ""}, + {"NullFloat64", Type, 0, ""}, + {"NullFloat64.Float64", Field, 0, ""}, + {"NullFloat64.Valid", Field, 0, ""}, + {"NullInt16", Type, 17, ""}, + {"NullInt16.Int16", Field, 17, ""}, + {"NullInt16.Valid", Field, 17, ""}, + {"NullInt32", Type, 13, ""}, + {"NullInt32.Int32", Field, 13, ""}, + {"NullInt32.Valid", Field, 13, ""}, + {"NullInt64", Type, 0, ""}, + {"NullInt64.Int64", Field, 0, ""}, + {"NullInt64.Valid", Field, 0, ""}, + {"NullString", Type, 0, ""}, + {"NullString.String", Field, 0, ""}, + {"NullString.Valid", Field, 0, ""}, + {"NullTime", Type, 13, ""}, + {"NullTime.Time", Field, 13, ""}, + {"NullTime.Valid", Field, 13, ""}, + {"Open", Func, 0, "func(driverName string, dataSourceName string) (*DB, error)"}, + {"OpenDB", Func, 10, "func(c driver.Connector) *DB"}, + {"Out", Type, 9, ""}, + {"Out.Dest", Field, 9, ""}, + {"Out.In", Field, 9, ""}, + {"RawBytes", Type, 0, ""}, + {"Register", Func, 0, "func(name string, driver driver.Driver)"}, + {"Result", Type, 0, ""}, + {"Row", Type, 0, ""}, + {"Rows", Type, 0, ""}, + {"Scanner", Type, 0, ""}, + {"Stmt", Type, 0, ""}, + {"Tx", Type, 0, ""}, + {"TxOptions", Type, 8, ""}, + {"TxOptions.Isolation", Field, 8, ""}, + {"TxOptions.ReadOnly", Field, 8, ""}, + }, + "database/sql/driver": { + {"(NotNull).ConvertValue", Method, 0, ""}, + {"(Null).ConvertValue", Method, 0, ""}, + {"(RowsAffected).LastInsertId", Method, 0, ""}, + {"(RowsAffected).RowsAffected", Method, 0, ""}, + {"Bool", Var, 0, ""}, + {"ColumnConverter", Type, 0, ""}, + {"Conn", Type, 0, ""}, + {"ConnBeginTx", Type, 8, ""}, + {"ConnPrepareContext", Type, 8, ""}, + {"Connector", Type, 10, ""}, + {"DefaultParameterConverter", Var, 0, ""}, + {"Driver", Type, 0, ""}, + {"DriverContext", Type, 10, ""}, + {"ErrBadConn", Var, 0, ""}, + {"ErrRemoveArgument", Var, 9, ""}, + {"ErrSkip", Var, 0, ""}, + {"Execer", Type, 0, ""}, + {"ExecerContext", Type, 8, ""}, + {"Int32", Var, 0, ""}, + {"IsScanValue", Func, 0, "func(v any) bool"}, + {"IsValue", Func, 0, "func(v any) bool"}, + {"IsolationLevel", Type, 8, ""}, + {"NamedValue", Type, 8, ""}, + {"NamedValue.Name", Field, 8, ""}, + {"NamedValue.Ordinal", Field, 8, ""}, + {"NamedValue.Value", Field, 8, ""}, + {"NamedValueChecker", Type, 9, ""}, + {"NotNull", Type, 0, ""}, + {"NotNull.Converter", Field, 0, ""}, + {"Null", Type, 0, ""}, + {"Null.Converter", Field, 0, ""}, + {"Pinger", Type, 8, ""}, + {"Queryer", Type, 1, ""}, + {"QueryerContext", Type, 8, ""}, + {"Result", Type, 0, ""}, + {"ResultNoRows", Var, 0, ""}, + {"Rows", Type, 0, ""}, + {"RowsAffected", Type, 0, ""}, + {"RowsColumnTypeDatabaseTypeName", Type, 8, ""}, + {"RowsColumnTypeLength", Type, 8, ""}, + {"RowsColumnTypeNullable", Type, 8, ""}, + {"RowsColumnTypePrecisionScale", Type, 8, ""}, + {"RowsColumnTypeScanType", Type, 8, ""}, + {"RowsNextResultSet", Type, 8, ""}, + {"SessionResetter", Type, 10, ""}, + {"Stmt", Type, 0, ""}, + {"StmtExecContext", Type, 8, ""}, + {"StmtQueryContext", Type, 8, ""}, + {"String", Var, 0, ""}, + {"Tx", Type, 0, ""}, + {"TxOptions", Type, 8, ""}, + {"TxOptions.Isolation", Field, 8, ""}, + {"TxOptions.ReadOnly", Field, 8, ""}, + {"Validator", Type, 15, ""}, + {"Value", Type, 0, ""}, + {"ValueConverter", Type, 0, ""}, + {"Valuer", Type, 0, ""}, + }, + "debug/buildinfo": { + {"BuildInfo", Type, 18, ""}, + {"Read", Func, 18, "func(r io.ReaderAt) (*BuildInfo, error)"}, + {"ReadFile", Func, 18, "func(name string) (info *BuildInfo, err error)"}, + }, + "debug/dwarf": { + {"(*AddrType).Basic", Method, 0, ""}, + {"(*AddrType).Common", Method, 0, ""}, + {"(*AddrType).Size", Method, 0, ""}, + {"(*AddrType).String", Method, 0, ""}, + {"(*ArrayType).Common", Method, 0, ""}, + {"(*ArrayType).Size", Method, 0, ""}, + {"(*ArrayType).String", Method, 0, ""}, + {"(*BasicType).Basic", Method, 0, ""}, + {"(*BasicType).Common", Method, 0, ""}, + {"(*BasicType).Size", Method, 0, ""}, + {"(*BasicType).String", Method, 0, ""}, + {"(*BoolType).Basic", Method, 0, ""}, + {"(*BoolType).Common", Method, 0, ""}, + {"(*BoolType).Size", Method, 0, ""}, + {"(*BoolType).String", Method, 0, ""}, + {"(*CharType).Basic", Method, 0, ""}, + {"(*CharType).Common", Method, 0, ""}, + {"(*CharType).Size", Method, 0, ""}, + {"(*CharType).String", Method, 0, ""}, + {"(*CommonType).Common", Method, 0, ""}, + {"(*CommonType).Size", Method, 0, ""}, + {"(*ComplexType).Basic", Method, 0, ""}, + {"(*ComplexType).Common", Method, 0, ""}, + {"(*ComplexType).Size", Method, 0, ""}, + {"(*ComplexType).String", Method, 0, ""}, + {"(*Data).AddSection", Method, 14, ""}, + {"(*Data).AddTypes", Method, 3, ""}, + {"(*Data).LineReader", Method, 5, ""}, + {"(*Data).Ranges", Method, 7, ""}, + {"(*Data).Reader", Method, 0, ""}, + {"(*Data).Type", Method, 0, ""}, + {"(*DotDotDotType).Common", Method, 0, ""}, + {"(*DotDotDotType).Size", Method, 0, ""}, + {"(*DotDotDotType).String", Method, 0, ""}, + {"(*Entry).AttrField", Method, 5, ""}, + {"(*Entry).Val", Method, 0, ""}, + {"(*EnumType).Common", Method, 0, ""}, + {"(*EnumType).Size", Method, 0, ""}, + {"(*EnumType).String", Method, 0, ""}, + {"(*FloatType).Basic", Method, 0, ""}, + {"(*FloatType).Common", Method, 0, ""}, + {"(*FloatType).Size", Method, 0, ""}, + {"(*FloatType).String", Method, 0, ""}, + {"(*FuncType).Common", Method, 0, ""}, + {"(*FuncType).Size", Method, 0, ""}, + {"(*FuncType).String", Method, 0, ""}, + {"(*IntType).Basic", Method, 0, ""}, + {"(*IntType).Common", Method, 0, ""}, + {"(*IntType).Size", Method, 0, ""}, + {"(*IntType).String", Method, 0, ""}, + {"(*LineReader).Files", Method, 14, ""}, + {"(*LineReader).Next", Method, 5, ""}, + {"(*LineReader).Reset", Method, 5, ""}, + {"(*LineReader).Seek", Method, 5, ""}, + {"(*LineReader).SeekPC", Method, 5, ""}, + {"(*LineReader).Tell", Method, 5, ""}, + {"(*PtrType).Common", Method, 0, ""}, + {"(*PtrType).Size", Method, 0, ""}, + {"(*PtrType).String", Method, 0, ""}, + {"(*QualType).Common", Method, 0, ""}, + {"(*QualType).Size", Method, 0, ""}, + {"(*QualType).String", Method, 0, ""}, + {"(*Reader).AddressSize", Method, 5, ""}, + {"(*Reader).ByteOrder", Method, 14, ""}, + {"(*Reader).Next", Method, 0, ""}, + {"(*Reader).Seek", Method, 0, ""}, + {"(*Reader).SeekPC", Method, 7, ""}, + {"(*Reader).SkipChildren", Method, 0, ""}, + {"(*StructType).Common", Method, 0, ""}, + {"(*StructType).Defn", Method, 0, ""}, + {"(*StructType).Size", Method, 0, ""}, + {"(*StructType).String", Method, 0, ""}, + {"(*TypedefType).Common", Method, 0, ""}, + {"(*TypedefType).Size", Method, 0, ""}, + {"(*TypedefType).String", Method, 0, ""}, + {"(*UcharType).Basic", Method, 0, ""}, + {"(*UcharType).Common", Method, 0, ""}, + {"(*UcharType).Size", Method, 0, ""}, + {"(*UcharType).String", Method, 0, ""}, + {"(*UintType).Basic", Method, 0, ""}, + {"(*UintType).Common", Method, 0, ""}, + {"(*UintType).Size", Method, 0, ""}, + {"(*UintType).String", Method, 0, ""}, + {"(*UnspecifiedType).Basic", Method, 4, ""}, + {"(*UnspecifiedType).Common", Method, 4, ""}, + {"(*UnspecifiedType).Size", Method, 4, ""}, + {"(*UnspecifiedType).String", Method, 4, ""}, + {"(*UnsupportedType).Common", Method, 13, ""}, + {"(*UnsupportedType).Size", Method, 13, ""}, + {"(*UnsupportedType).String", Method, 13, ""}, + {"(*VoidType).Common", Method, 0, ""}, + {"(*VoidType).Size", Method, 0, ""}, + {"(*VoidType).String", Method, 0, ""}, + {"(Attr).GoString", Method, 0, ""}, + {"(Attr).String", Method, 0, ""}, + {"(Class).GoString", Method, 5, ""}, + {"(Class).String", Method, 5, ""}, + {"(DecodeError).Error", Method, 0, ""}, + {"(Tag).GoString", Method, 0, ""}, + {"(Tag).String", Method, 0, ""}, + {"AddrType", Type, 0, ""}, + {"AddrType.BasicType", Field, 0, ""}, + {"ArrayType", Type, 0, ""}, + {"ArrayType.CommonType", Field, 0, ""}, + {"ArrayType.Count", Field, 0, ""}, + {"ArrayType.StrideBitSize", Field, 0, ""}, + {"ArrayType.Type", Field, 0, ""}, + {"Attr", Type, 0, ""}, + {"AttrAbstractOrigin", Const, 0, ""}, + {"AttrAccessibility", Const, 0, ""}, + {"AttrAddrBase", Const, 14, ""}, + {"AttrAddrClass", Const, 0, ""}, + {"AttrAlignment", Const, 14, ""}, + {"AttrAllocated", Const, 0, ""}, + {"AttrArtificial", Const, 0, ""}, + {"AttrAssociated", Const, 0, ""}, + {"AttrBaseTypes", Const, 0, ""}, + {"AttrBinaryScale", Const, 14, ""}, + {"AttrBitOffset", Const, 0, ""}, + {"AttrBitSize", Const, 0, ""}, + {"AttrByteSize", Const, 0, ""}, + {"AttrCallAllCalls", Const, 14, ""}, + {"AttrCallAllSourceCalls", Const, 14, ""}, + {"AttrCallAllTailCalls", Const, 14, ""}, + {"AttrCallColumn", Const, 0, ""}, + {"AttrCallDataLocation", Const, 14, ""}, + {"AttrCallDataValue", Const, 14, ""}, + {"AttrCallFile", Const, 0, ""}, + {"AttrCallLine", Const, 0, ""}, + {"AttrCallOrigin", Const, 14, ""}, + {"AttrCallPC", Const, 14, ""}, + {"AttrCallParameter", Const, 14, ""}, + {"AttrCallReturnPC", Const, 14, ""}, + {"AttrCallTailCall", Const, 14, ""}, + {"AttrCallTarget", Const, 14, ""}, + {"AttrCallTargetClobbered", Const, 14, ""}, + {"AttrCallValue", Const, 14, ""}, + {"AttrCalling", Const, 0, ""}, + {"AttrCommonRef", Const, 0, ""}, + {"AttrCompDir", Const, 0, ""}, + {"AttrConstExpr", Const, 14, ""}, + {"AttrConstValue", Const, 0, ""}, + {"AttrContainingType", Const, 0, ""}, + {"AttrCount", Const, 0, ""}, + {"AttrDataBitOffset", Const, 14, ""}, + {"AttrDataLocation", Const, 0, ""}, + {"AttrDataMemberLoc", Const, 0, ""}, + {"AttrDecimalScale", Const, 14, ""}, + {"AttrDecimalSign", Const, 14, ""}, + {"AttrDeclColumn", Const, 0, ""}, + {"AttrDeclFile", Const, 0, ""}, + {"AttrDeclLine", Const, 0, ""}, + {"AttrDeclaration", Const, 0, ""}, + {"AttrDefaultValue", Const, 0, ""}, + {"AttrDefaulted", Const, 14, ""}, + {"AttrDeleted", Const, 14, ""}, + {"AttrDescription", Const, 0, ""}, + {"AttrDigitCount", Const, 14, ""}, + {"AttrDiscr", Const, 0, ""}, + {"AttrDiscrList", Const, 0, ""}, + {"AttrDiscrValue", Const, 0, ""}, + {"AttrDwoName", Const, 14, ""}, + {"AttrElemental", Const, 14, ""}, + {"AttrEncoding", Const, 0, ""}, + {"AttrEndianity", Const, 14, ""}, + {"AttrEntrypc", Const, 0, ""}, + {"AttrEnumClass", Const, 14, ""}, + {"AttrExplicit", Const, 14, ""}, + {"AttrExportSymbols", Const, 14, ""}, + {"AttrExtension", Const, 0, ""}, + {"AttrExternal", Const, 0, ""}, + {"AttrFrameBase", Const, 0, ""}, + {"AttrFriend", Const, 0, ""}, + {"AttrHighpc", Const, 0, ""}, + {"AttrIdentifierCase", Const, 0, ""}, + {"AttrImport", Const, 0, ""}, + {"AttrInline", Const, 0, ""}, + {"AttrIsOptional", Const, 0, ""}, + {"AttrLanguage", Const, 0, ""}, + {"AttrLinkageName", Const, 14, ""}, + {"AttrLocation", Const, 0, ""}, + {"AttrLoclistsBase", Const, 14, ""}, + {"AttrLowerBound", Const, 0, ""}, + {"AttrLowpc", Const, 0, ""}, + {"AttrMacroInfo", Const, 0, ""}, + {"AttrMacros", Const, 14, ""}, + {"AttrMainSubprogram", Const, 14, ""}, + {"AttrMutable", Const, 14, ""}, + {"AttrName", Const, 0, ""}, + {"AttrNamelistItem", Const, 0, ""}, + {"AttrNoreturn", Const, 14, ""}, + {"AttrObjectPointer", Const, 14, ""}, + {"AttrOrdering", Const, 0, ""}, + {"AttrPictureString", Const, 14, ""}, + {"AttrPriority", Const, 0, ""}, + {"AttrProducer", Const, 0, ""}, + {"AttrPrototyped", Const, 0, ""}, + {"AttrPure", Const, 14, ""}, + {"AttrRanges", Const, 0, ""}, + {"AttrRank", Const, 14, ""}, + {"AttrRecursive", Const, 14, ""}, + {"AttrReference", Const, 14, ""}, + {"AttrReturnAddr", Const, 0, ""}, + {"AttrRnglistsBase", Const, 14, ""}, + {"AttrRvalueReference", Const, 14, ""}, + {"AttrSegment", Const, 0, ""}, + {"AttrSibling", Const, 0, ""}, + {"AttrSignature", Const, 14, ""}, + {"AttrSmall", Const, 14, ""}, + {"AttrSpecification", Const, 0, ""}, + {"AttrStartScope", Const, 0, ""}, + {"AttrStaticLink", Const, 0, ""}, + {"AttrStmtList", Const, 0, ""}, + {"AttrStrOffsetsBase", Const, 14, ""}, + {"AttrStride", Const, 0, ""}, + {"AttrStrideSize", Const, 0, ""}, + {"AttrStringLength", Const, 0, ""}, + {"AttrStringLengthBitSize", Const, 14, ""}, + {"AttrStringLengthByteSize", Const, 14, ""}, + {"AttrThreadsScaled", Const, 14, ""}, + {"AttrTrampoline", Const, 0, ""}, + {"AttrType", Const, 0, ""}, + {"AttrUpperBound", Const, 0, ""}, + {"AttrUseLocation", Const, 0, ""}, + {"AttrUseUTF8", Const, 0, ""}, + {"AttrVarParam", Const, 0, ""}, + {"AttrVirtuality", Const, 0, ""}, + {"AttrVisibility", Const, 0, ""}, + {"AttrVtableElemLoc", Const, 0, ""}, + {"BasicType", Type, 0, ""}, + {"BasicType.BitOffset", Field, 0, ""}, + {"BasicType.BitSize", Field, 0, ""}, + {"BasicType.CommonType", Field, 0, ""}, + {"BasicType.DataBitOffset", Field, 18, ""}, + {"BoolType", Type, 0, ""}, + {"BoolType.BasicType", Field, 0, ""}, + {"CharType", Type, 0, ""}, + {"CharType.BasicType", Field, 0, ""}, + {"Class", Type, 5, ""}, + {"ClassAddrPtr", Const, 14, ""}, + {"ClassAddress", Const, 5, ""}, + {"ClassBlock", Const, 5, ""}, + {"ClassConstant", Const, 5, ""}, + {"ClassExprLoc", Const, 5, ""}, + {"ClassFlag", Const, 5, ""}, + {"ClassLinePtr", Const, 5, ""}, + {"ClassLocList", Const, 14, ""}, + {"ClassLocListPtr", Const, 5, ""}, + {"ClassMacPtr", Const, 5, ""}, + {"ClassRangeListPtr", Const, 5, ""}, + {"ClassReference", Const, 5, ""}, + {"ClassReferenceAlt", Const, 5, ""}, + {"ClassReferenceSig", Const, 5, ""}, + {"ClassRngList", Const, 14, ""}, + {"ClassRngListsPtr", Const, 14, ""}, + {"ClassStrOffsetsPtr", Const, 14, ""}, + {"ClassString", Const, 5, ""}, + {"ClassStringAlt", Const, 5, ""}, + {"ClassUnknown", Const, 6, ""}, + {"CommonType", Type, 0, ""}, + {"CommonType.ByteSize", Field, 0, ""}, + {"CommonType.Name", Field, 0, ""}, + {"ComplexType", Type, 0, ""}, + {"ComplexType.BasicType", Field, 0, ""}, + {"Data", Type, 0, ""}, + {"DecodeError", Type, 0, ""}, + {"DecodeError.Err", Field, 0, ""}, + {"DecodeError.Name", Field, 0, ""}, + {"DecodeError.Offset", Field, 0, ""}, + {"DotDotDotType", Type, 0, ""}, + {"DotDotDotType.CommonType", Field, 0, ""}, + {"Entry", Type, 0, ""}, + {"Entry.Children", Field, 0, ""}, + {"Entry.Field", Field, 0, ""}, + {"Entry.Offset", Field, 0, ""}, + {"Entry.Tag", Field, 0, ""}, + {"EnumType", Type, 0, ""}, + {"EnumType.CommonType", Field, 0, ""}, + {"EnumType.EnumName", Field, 0, ""}, + {"EnumType.Val", Field, 0, ""}, + {"EnumValue", Type, 0, ""}, + {"EnumValue.Name", Field, 0, ""}, + {"EnumValue.Val", Field, 0, ""}, + {"ErrUnknownPC", Var, 5, ""}, + {"Field", Type, 0, ""}, + {"Field.Attr", Field, 0, ""}, + {"Field.Class", Field, 5, ""}, + {"Field.Val", Field, 0, ""}, + {"FloatType", Type, 0, ""}, + {"FloatType.BasicType", Field, 0, ""}, + {"FuncType", Type, 0, ""}, + {"FuncType.CommonType", Field, 0, ""}, + {"FuncType.ParamType", Field, 0, ""}, + {"FuncType.ReturnType", Field, 0, ""}, + {"IntType", Type, 0, ""}, + {"IntType.BasicType", Field, 0, ""}, + {"LineEntry", Type, 5, ""}, + {"LineEntry.Address", Field, 5, ""}, + {"LineEntry.BasicBlock", Field, 5, ""}, + {"LineEntry.Column", Field, 5, ""}, + {"LineEntry.Discriminator", Field, 5, ""}, + {"LineEntry.EndSequence", Field, 5, ""}, + {"LineEntry.EpilogueBegin", Field, 5, ""}, + {"LineEntry.File", Field, 5, ""}, + {"LineEntry.ISA", Field, 5, ""}, + {"LineEntry.IsStmt", Field, 5, ""}, + {"LineEntry.Line", Field, 5, ""}, + {"LineEntry.OpIndex", Field, 5, ""}, + {"LineEntry.PrologueEnd", Field, 5, ""}, + {"LineFile", Type, 5, ""}, + {"LineFile.Length", Field, 5, ""}, + {"LineFile.Mtime", Field, 5, ""}, + {"LineFile.Name", Field, 5, ""}, + {"LineReader", Type, 5, ""}, + {"LineReaderPos", Type, 5, ""}, + {"New", Func, 0, "func(abbrev []byte, aranges []byte, frame []byte, info []byte, line []byte, pubnames []byte, ranges []byte, str []byte) (*Data, error)"}, + {"Offset", Type, 0, ""}, + {"PtrType", Type, 0, ""}, + {"PtrType.CommonType", Field, 0, ""}, + {"PtrType.Type", Field, 0, ""}, + {"QualType", Type, 0, ""}, + {"QualType.CommonType", Field, 0, ""}, + {"QualType.Qual", Field, 0, ""}, + {"QualType.Type", Field, 0, ""}, + {"Reader", Type, 0, ""}, + {"StructField", Type, 0, ""}, + {"StructField.BitOffset", Field, 0, ""}, + {"StructField.BitSize", Field, 0, ""}, + {"StructField.ByteOffset", Field, 0, ""}, + {"StructField.ByteSize", Field, 0, ""}, + {"StructField.DataBitOffset", Field, 18, ""}, + {"StructField.Name", Field, 0, ""}, + {"StructField.Type", Field, 0, ""}, + {"StructType", Type, 0, ""}, + {"StructType.CommonType", Field, 0, ""}, + {"StructType.Field", Field, 0, ""}, + {"StructType.Incomplete", Field, 0, ""}, + {"StructType.Kind", Field, 0, ""}, + {"StructType.StructName", Field, 0, ""}, + {"Tag", Type, 0, ""}, + {"TagAccessDeclaration", Const, 0, ""}, + {"TagArrayType", Const, 0, ""}, + {"TagAtomicType", Const, 14, ""}, + {"TagBaseType", Const, 0, ""}, + {"TagCallSite", Const, 14, ""}, + {"TagCallSiteParameter", Const, 14, ""}, + {"TagCatchDwarfBlock", Const, 0, ""}, + {"TagClassType", Const, 0, ""}, + {"TagCoarrayType", Const, 14, ""}, + {"TagCommonDwarfBlock", Const, 0, ""}, + {"TagCommonInclusion", Const, 0, ""}, + {"TagCompileUnit", Const, 0, ""}, + {"TagCondition", Const, 3, ""}, + {"TagConstType", Const, 0, ""}, + {"TagConstant", Const, 0, ""}, + {"TagDwarfProcedure", Const, 0, ""}, + {"TagDynamicType", Const, 14, ""}, + {"TagEntryPoint", Const, 0, ""}, + {"TagEnumerationType", Const, 0, ""}, + {"TagEnumerator", Const, 0, ""}, + {"TagFileType", Const, 0, ""}, + {"TagFormalParameter", Const, 0, ""}, + {"TagFriend", Const, 0, ""}, + {"TagGenericSubrange", Const, 14, ""}, + {"TagImmutableType", Const, 14, ""}, + {"TagImportedDeclaration", Const, 0, ""}, + {"TagImportedModule", Const, 0, ""}, + {"TagImportedUnit", Const, 0, ""}, + {"TagInheritance", Const, 0, ""}, + {"TagInlinedSubroutine", Const, 0, ""}, + {"TagInterfaceType", Const, 0, ""}, + {"TagLabel", Const, 0, ""}, + {"TagLexDwarfBlock", Const, 0, ""}, + {"TagMember", Const, 0, ""}, + {"TagModule", Const, 0, ""}, + {"TagMutableType", Const, 0, ""}, + {"TagNamelist", Const, 0, ""}, + {"TagNamelistItem", Const, 0, ""}, + {"TagNamespace", Const, 0, ""}, + {"TagPackedType", Const, 0, ""}, + {"TagPartialUnit", Const, 0, ""}, + {"TagPointerType", Const, 0, ""}, + {"TagPtrToMemberType", Const, 0, ""}, + {"TagReferenceType", Const, 0, ""}, + {"TagRestrictType", Const, 0, ""}, + {"TagRvalueReferenceType", Const, 3, ""}, + {"TagSetType", Const, 0, ""}, + {"TagSharedType", Const, 3, ""}, + {"TagSkeletonUnit", Const, 14, ""}, + {"TagStringType", Const, 0, ""}, + {"TagStructType", Const, 0, ""}, + {"TagSubprogram", Const, 0, ""}, + {"TagSubrangeType", Const, 0, ""}, + {"TagSubroutineType", Const, 0, ""}, + {"TagTemplateAlias", Const, 3, ""}, + {"TagTemplateTypeParameter", Const, 0, ""}, + {"TagTemplateValueParameter", Const, 0, ""}, + {"TagThrownType", Const, 0, ""}, + {"TagTryDwarfBlock", Const, 0, ""}, + {"TagTypeUnit", Const, 3, ""}, + {"TagTypedef", Const, 0, ""}, + {"TagUnionType", Const, 0, ""}, + {"TagUnspecifiedParameters", Const, 0, ""}, + {"TagUnspecifiedType", Const, 0, ""}, + {"TagVariable", Const, 0, ""}, + {"TagVariant", Const, 0, ""}, + {"TagVariantPart", Const, 0, ""}, + {"TagVolatileType", Const, 0, ""}, + {"TagWithStmt", Const, 0, ""}, + {"Type", Type, 0, ""}, + {"TypedefType", Type, 0, ""}, + {"TypedefType.CommonType", Field, 0, ""}, + {"TypedefType.Type", Field, 0, ""}, + {"UcharType", Type, 0, ""}, + {"UcharType.BasicType", Field, 0, ""}, + {"UintType", Type, 0, ""}, + {"UintType.BasicType", Field, 0, ""}, + {"UnspecifiedType", Type, 4, ""}, + {"UnspecifiedType.BasicType", Field, 4, ""}, + {"UnsupportedType", Type, 13, ""}, + {"UnsupportedType.CommonType", Field, 13, ""}, + {"UnsupportedType.Tag", Field, 13, ""}, + {"VoidType", Type, 0, ""}, + {"VoidType.CommonType", Field, 0, ""}, + }, + "debug/elf": { + {"(*File).Close", Method, 0, ""}, + {"(*File).DWARF", Method, 0, ""}, + {"(*File).DynString", Method, 1, ""}, + {"(*File).DynValue", Method, 21, ""}, + {"(*File).DynamicSymbols", Method, 4, ""}, + {"(*File).DynamicVersionNeeds", Method, 24, ""}, + {"(*File).DynamicVersions", Method, 24, ""}, + {"(*File).ImportedLibraries", Method, 0, ""}, + {"(*File).ImportedSymbols", Method, 0, ""}, + {"(*File).Section", Method, 0, ""}, + {"(*File).SectionByType", Method, 0, ""}, + {"(*File).Symbols", Method, 0, ""}, + {"(*FormatError).Error", Method, 0, ""}, + {"(*Prog).Open", Method, 0, ""}, + {"(*Section).Data", Method, 0, ""}, + {"(*Section).Open", Method, 0, ""}, + {"(Class).GoString", Method, 0, ""}, + {"(Class).String", Method, 0, ""}, + {"(CompressionType).GoString", Method, 6, ""}, + {"(CompressionType).String", Method, 6, ""}, + {"(Data).GoString", Method, 0, ""}, + {"(Data).String", Method, 0, ""}, + {"(DynFlag).GoString", Method, 0, ""}, + {"(DynFlag).String", Method, 0, ""}, + {"(DynFlag1).GoString", Method, 21, ""}, + {"(DynFlag1).String", Method, 21, ""}, + {"(DynTag).GoString", Method, 0, ""}, + {"(DynTag).String", Method, 0, ""}, + {"(Machine).GoString", Method, 0, ""}, + {"(Machine).String", Method, 0, ""}, + {"(NType).GoString", Method, 0, ""}, + {"(NType).String", Method, 0, ""}, + {"(OSABI).GoString", Method, 0, ""}, + {"(OSABI).String", Method, 0, ""}, + {"(Prog).ReadAt", Method, 0, ""}, + {"(ProgFlag).GoString", Method, 0, ""}, + {"(ProgFlag).String", Method, 0, ""}, + {"(ProgType).GoString", Method, 0, ""}, + {"(ProgType).String", Method, 0, ""}, + {"(R_386).GoString", Method, 0, ""}, + {"(R_386).String", Method, 0, ""}, + {"(R_390).GoString", Method, 7, ""}, + {"(R_390).String", Method, 7, ""}, + {"(R_AARCH64).GoString", Method, 4, ""}, + {"(R_AARCH64).String", Method, 4, ""}, + {"(R_ALPHA).GoString", Method, 0, ""}, + {"(R_ALPHA).String", Method, 0, ""}, + {"(R_ARM).GoString", Method, 0, ""}, + {"(R_ARM).String", Method, 0, ""}, + {"(R_LARCH).GoString", Method, 19, ""}, + {"(R_LARCH).String", Method, 19, ""}, + {"(R_MIPS).GoString", Method, 6, ""}, + {"(R_MIPS).String", Method, 6, ""}, + {"(R_PPC).GoString", Method, 0, ""}, + {"(R_PPC).String", Method, 0, ""}, + {"(R_PPC64).GoString", Method, 5, ""}, + {"(R_PPC64).String", Method, 5, ""}, + {"(R_RISCV).GoString", Method, 11, ""}, + {"(R_RISCV).String", Method, 11, ""}, + {"(R_SPARC).GoString", Method, 0, ""}, + {"(R_SPARC).String", Method, 0, ""}, + {"(R_X86_64).GoString", Method, 0, ""}, + {"(R_X86_64).String", Method, 0, ""}, + {"(Section).ReadAt", Method, 0, ""}, + {"(SectionFlag).GoString", Method, 0, ""}, + {"(SectionFlag).String", Method, 0, ""}, + {"(SectionIndex).GoString", Method, 0, ""}, + {"(SectionIndex).String", Method, 0, ""}, + {"(SectionType).GoString", Method, 0, ""}, + {"(SectionType).String", Method, 0, ""}, + {"(SymBind).GoString", Method, 0, ""}, + {"(SymBind).String", Method, 0, ""}, + {"(SymType).GoString", Method, 0, ""}, + {"(SymType).String", Method, 0, ""}, + {"(SymVis).GoString", Method, 0, ""}, + {"(SymVis).String", Method, 0, ""}, + {"(Type).GoString", Method, 0, ""}, + {"(Type).String", Method, 0, ""}, + {"(Version).GoString", Method, 0, ""}, + {"(Version).String", Method, 0, ""}, + {"(VersionIndex).Index", Method, 24, ""}, + {"(VersionIndex).IsHidden", Method, 24, ""}, + {"ARM_MAGIC_TRAMP_NUMBER", Const, 0, ""}, + {"COMPRESS_HIOS", Const, 6, ""}, + {"COMPRESS_HIPROC", Const, 6, ""}, + {"COMPRESS_LOOS", Const, 6, ""}, + {"COMPRESS_LOPROC", Const, 6, ""}, + {"COMPRESS_ZLIB", Const, 6, ""}, + {"COMPRESS_ZSTD", Const, 21, ""}, + {"Chdr32", Type, 6, ""}, + {"Chdr32.Addralign", Field, 6, ""}, + {"Chdr32.Size", Field, 6, ""}, + {"Chdr32.Type", Field, 6, ""}, + {"Chdr64", Type, 6, ""}, + {"Chdr64.Addralign", Field, 6, ""}, + {"Chdr64.Size", Field, 6, ""}, + {"Chdr64.Type", Field, 6, ""}, + {"Class", Type, 0, ""}, + {"CompressionType", Type, 6, ""}, + {"DF_1_CONFALT", Const, 21, ""}, + {"DF_1_DIRECT", Const, 21, ""}, + {"DF_1_DISPRELDNE", Const, 21, ""}, + {"DF_1_DISPRELPND", Const, 21, ""}, + {"DF_1_EDITED", Const, 21, ""}, + {"DF_1_ENDFILTEE", Const, 21, ""}, + {"DF_1_GLOBAL", Const, 21, ""}, + {"DF_1_GLOBAUDIT", Const, 21, ""}, + {"DF_1_GROUP", Const, 21, ""}, + {"DF_1_IGNMULDEF", Const, 21, ""}, + {"DF_1_INITFIRST", Const, 21, ""}, + {"DF_1_INTERPOSE", Const, 21, ""}, + {"DF_1_KMOD", Const, 21, ""}, + {"DF_1_LOADFLTR", Const, 21, ""}, + {"DF_1_NOCOMMON", Const, 21, ""}, + {"DF_1_NODEFLIB", Const, 21, ""}, + {"DF_1_NODELETE", Const, 21, ""}, + {"DF_1_NODIRECT", Const, 21, ""}, + {"DF_1_NODUMP", Const, 21, ""}, + {"DF_1_NOHDR", Const, 21, ""}, + {"DF_1_NOKSYMS", Const, 21, ""}, + {"DF_1_NOOPEN", Const, 21, ""}, + {"DF_1_NORELOC", Const, 21, ""}, + {"DF_1_NOW", Const, 21, ""}, + {"DF_1_ORIGIN", Const, 21, ""}, + {"DF_1_PIE", Const, 21, ""}, + {"DF_1_SINGLETON", Const, 21, ""}, + {"DF_1_STUB", Const, 21, ""}, + {"DF_1_SYMINTPOSE", Const, 21, ""}, + {"DF_1_TRANS", Const, 21, ""}, + {"DF_1_WEAKFILTER", Const, 21, ""}, + {"DF_BIND_NOW", Const, 0, ""}, + {"DF_ORIGIN", Const, 0, ""}, + {"DF_STATIC_TLS", Const, 0, ""}, + {"DF_SYMBOLIC", Const, 0, ""}, + {"DF_TEXTREL", Const, 0, ""}, + {"DT_ADDRRNGHI", Const, 16, ""}, + {"DT_ADDRRNGLO", Const, 16, ""}, + {"DT_AUDIT", Const, 16, ""}, + {"DT_AUXILIARY", Const, 16, ""}, + {"DT_BIND_NOW", Const, 0, ""}, + {"DT_CHECKSUM", Const, 16, ""}, + {"DT_CONFIG", Const, 16, ""}, + {"DT_DEBUG", Const, 0, ""}, + {"DT_DEPAUDIT", Const, 16, ""}, + {"DT_ENCODING", Const, 0, ""}, + {"DT_FEATURE", Const, 16, ""}, + {"DT_FILTER", Const, 16, ""}, + {"DT_FINI", Const, 0, ""}, + {"DT_FINI_ARRAY", Const, 0, ""}, + {"DT_FINI_ARRAYSZ", Const, 0, ""}, + {"DT_FLAGS", Const, 0, ""}, + {"DT_FLAGS_1", Const, 16, ""}, + {"DT_GNU_CONFLICT", Const, 16, ""}, + {"DT_GNU_CONFLICTSZ", Const, 16, ""}, + {"DT_GNU_HASH", Const, 16, ""}, + {"DT_GNU_LIBLIST", Const, 16, ""}, + {"DT_GNU_LIBLISTSZ", Const, 16, ""}, + {"DT_GNU_PRELINKED", Const, 16, ""}, + {"DT_HASH", Const, 0, ""}, + {"DT_HIOS", Const, 0, ""}, + {"DT_HIPROC", Const, 0, ""}, + {"DT_INIT", Const, 0, ""}, + {"DT_INIT_ARRAY", Const, 0, ""}, + {"DT_INIT_ARRAYSZ", Const, 0, ""}, + {"DT_JMPREL", Const, 0, ""}, + {"DT_LOOS", Const, 0, ""}, + {"DT_LOPROC", Const, 0, ""}, + {"DT_MIPS_AUX_DYNAMIC", Const, 16, ""}, + {"DT_MIPS_BASE_ADDRESS", Const, 16, ""}, + {"DT_MIPS_COMPACT_SIZE", Const, 16, ""}, + {"DT_MIPS_CONFLICT", Const, 16, ""}, + {"DT_MIPS_CONFLICTNO", Const, 16, ""}, + {"DT_MIPS_CXX_FLAGS", Const, 16, ""}, + {"DT_MIPS_DELTA_CLASS", Const, 16, ""}, + {"DT_MIPS_DELTA_CLASSSYM", Const, 16, ""}, + {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16, ""}, + {"DT_MIPS_DELTA_CLASS_NO", Const, 16, ""}, + {"DT_MIPS_DELTA_INSTANCE", Const, 16, ""}, + {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16, ""}, + {"DT_MIPS_DELTA_RELOC", Const, 16, ""}, + {"DT_MIPS_DELTA_RELOC_NO", Const, 16, ""}, + {"DT_MIPS_DELTA_SYM", Const, 16, ""}, + {"DT_MIPS_DELTA_SYM_NO", Const, 16, ""}, + {"DT_MIPS_DYNSTR_ALIGN", Const, 16, ""}, + {"DT_MIPS_FLAGS", Const, 16, ""}, + {"DT_MIPS_GOTSYM", Const, 16, ""}, + {"DT_MIPS_GP_VALUE", Const, 16, ""}, + {"DT_MIPS_HIDDEN_GOTIDX", Const, 16, ""}, + {"DT_MIPS_HIPAGENO", Const, 16, ""}, + {"DT_MIPS_ICHECKSUM", Const, 16, ""}, + {"DT_MIPS_INTERFACE", Const, 16, ""}, + {"DT_MIPS_INTERFACE_SIZE", Const, 16, ""}, + {"DT_MIPS_IVERSION", Const, 16, ""}, + {"DT_MIPS_LIBLIST", Const, 16, ""}, + {"DT_MIPS_LIBLISTNO", Const, 16, ""}, + {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16, ""}, + {"DT_MIPS_LOCAL_GOTIDX", Const, 16, ""}, + {"DT_MIPS_LOCAL_GOTNO", Const, 16, ""}, + {"DT_MIPS_MSYM", Const, 16, ""}, + {"DT_MIPS_OPTIONS", Const, 16, ""}, + {"DT_MIPS_PERF_SUFFIX", Const, 16, ""}, + {"DT_MIPS_PIXIE_INIT", Const, 16, ""}, + {"DT_MIPS_PLTGOT", Const, 16, ""}, + {"DT_MIPS_PROTECTED_GOTIDX", Const, 16, ""}, + {"DT_MIPS_RLD_MAP", Const, 16, ""}, + {"DT_MIPS_RLD_MAP_REL", Const, 16, ""}, + {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16, ""}, + {"DT_MIPS_RLD_VERSION", Const, 16, ""}, + {"DT_MIPS_RWPLT", Const, 16, ""}, + {"DT_MIPS_SYMBOL_LIB", Const, 16, ""}, + {"DT_MIPS_SYMTABNO", Const, 16, ""}, + {"DT_MIPS_TIME_STAMP", Const, 16, ""}, + {"DT_MIPS_UNREFEXTNO", Const, 16, ""}, + {"DT_MOVEENT", Const, 16, ""}, + {"DT_MOVESZ", Const, 16, ""}, + {"DT_MOVETAB", Const, 16, ""}, + {"DT_NEEDED", Const, 0, ""}, + {"DT_NULL", Const, 0, ""}, + {"DT_PLTGOT", Const, 0, ""}, + {"DT_PLTPAD", Const, 16, ""}, + {"DT_PLTPADSZ", Const, 16, ""}, + {"DT_PLTREL", Const, 0, ""}, + {"DT_PLTRELSZ", Const, 0, ""}, + {"DT_POSFLAG_1", Const, 16, ""}, + {"DT_PPC64_GLINK", Const, 16, ""}, + {"DT_PPC64_OPD", Const, 16, ""}, + {"DT_PPC64_OPDSZ", Const, 16, ""}, + {"DT_PPC64_OPT", Const, 16, ""}, + {"DT_PPC_GOT", Const, 16, ""}, + {"DT_PPC_OPT", Const, 16, ""}, + {"DT_PREINIT_ARRAY", Const, 0, ""}, + {"DT_PREINIT_ARRAYSZ", Const, 0, ""}, + {"DT_REL", Const, 0, ""}, + {"DT_RELA", Const, 0, ""}, + {"DT_RELACOUNT", Const, 16, ""}, + {"DT_RELAENT", Const, 0, ""}, + {"DT_RELASZ", Const, 0, ""}, + {"DT_RELCOUNT", Const, 16, ""}, + {"DT_RELENT", Const, 0, ""}, + {"DT_RELSZ", Const, 0, ""}, + {"DT_RPATH", Const, 0, ""}, + {"DT_RUNPATH", Const, 0, ""}, + {"DT_SONAME", Const, 0, ""}, + {"DT_SPARC_REGISTER", Const, 16, ""}, + {"DT_STRSZ", Const, 0, ""}, + {"DT_STRTAB", Const, 0, ""}, + {"DT_SYMBOLIC", Const, 0, ""}, + {"DT_SYMENT", Const, 0, ""}, + {"DT_SYMINENT", Const, 16, ""}, + {"DT_SYMINFO", Const, 16, ""}, + {"DT_SYMINSZ", Const, 16, ""}, + {"DT_SYMTAB", Const, 0, ""}, + {"DT_SYMTAB_SHNDX", Const, 16, ""}, + {"DT_TEXTREL", Const, 0, ""}, + {"DT_TLSDESC_GOT", Const, 16, ""}, + {"DT_TLSDESC_PLT", Const, 16, ""}, + {"DT_USED", Const, 16, ""}, + {"DT_VALRNGHI", Const, 16, ""}, + {"DT_VALRNGLO", Const, 16, ""}, + {"DT_VERDEF", Const, 16, ""}, + {"DT_VERDEFNUM", Const, 16, ""}, + {"DT_VERNEED", Const, 0, ""}, + {"DT_VERNEEDNUM", Const, 0, ""}, + {"DT_VERSYM", Const, 0, ""}, + {"Data", Type, 0, ""}, + {"Dyn32", Type, 0, ""}, + {"Dyn32.Tag", Field, 0, ""}, + {"Dyn32.Val", Field, 0, ""}, + {"Dyn64", Type, 0, ""}, + {"Dyn64.Tag", Field, 0, ""}, + {"Dyn64.Val", Field, 0, ""}, + {"DynFlag", Type, 0, ""}, + {"DynFlag1", Type, 21, ""}, + {"DynTag", Type, 0, ""}, + {"DynamicVersion", Type, 24, ""}, + {"DynamicVersion.Deps", Field, 24, ""}, + {"DynamicVersion.Flags", Field, 24, ""}, + {"DynamicVersion.Index", Field, 24, ""}, + {"DynamicVersion.Name", Field, 24, ""}, + {"DynamicVersionDep", Type, 24, ""}, + {"DynamicVersionDep.Dep", Field, 24, ""}, + {"DynamicVersionDep.Flags", Field, 24, ""}, + {"DynamicVersionDep.Index", Field, 24, ""}, + {"DynamicVersionFlag", Type, 24, ""}, + {"DynamicVersionNeed", Type, 24, ""}, + {"DynamicVersionNeed.Name", Field, 24, ""}, + {"DynamicVersionNeed.Needs", Field, 24, ""}, + {"EI_ABIVERSION", Const, 0, ""}, + {"EI_CLASS", Const, 0, ""}, + {"EI_DATA", Const, 0, ""}, + {"EI_NIDENT", Const, 0, ""}, + {"EI_OSABI", Const, 0, ""}, + {"EI_PAD", Const, 0, ""}, + {"EI_VERSION", Const, 0, ""}, + {"ELFCLASS32", Const, 0, ""}, + {"ELFCLASS64", Const, 0, ""}, + {"ELFCLASSNONE", Const, 0, ""}, + {"ELFDATA2LSB", Const, 0, ""}, + {"ELFDATA2MSB", Const, 0, ""}, + {"ELFDATANONE", Const, 0, ""}, + {"ELFMAG", Const, 0, ""}, + {"ELFOSABI_86OPEN", Const, 0, ""}, + {"ELFOSABI_AIX", Const, 0, ""}, + {"ELFOSABI_ARM", Const, 0, ""}, + {"ELFOSABI_AROS", Const, 11, ""}, + {"ELFOSABI_CLOUDABI", Const, 11, ""}, + {"ELFOSABI_FENIXOS", Const, 11, ""}, + {"ELFOSABI_FREEBSD", Const, 0, ""}, + {"ELFOSABI_HPUX", Const, 0, ""}, + {"ELFOSABI_HURD", Const, 0, ""}, + {"ELFOSABI_IRIX", Const, 0, ""}, + {"ELFOSABI_LINUX", Const, 0, ""}, + {"ELFOSABI_MODESTO", Const, 0, ""}, + {"ELFOSABI_NETBSD", Const, 0, ""}, + {"ELFOSABI_NONE", Const, 0, ""}, + {"ELFOSABI_NSK", Const, 0, ""}, + {"ELFOSABI_OPENBSD", Const, 0, ""}, + {"ELFOSABI_OPENVMS", Const, 0, ""}, + {"ELFOSABI_SOLARIS", Const, 0, ""}, + {"ELFOSABI_STANDALONE", Const, 0, ""}, + {"ELFOSABI_TRU64", Const, 0, ""}, + {"EM_386", Const, 0, ""}, + {"EM_486", Const, 0, ""}, + {"EM_56800EX", Const, 11, ""}, + {"EM_68HC05", Const, 11, ""}, + {"EM_68HC08", Const, 11, ""}, + {"EM_68HC11", Const, 11, ""}, + {"EM_68HC12", Const, 0, ""}, + {"EM_68HC16", Const, 11, ""}, + {"EM_68K", Const, 0, ""}, + {"EM_78KOR", Const, 11, ""}, + {"EM_8051", Const, 11, ""}, + {"EM_860", Const, 0, ""}, + {"EM_88K", Const, 0, ""}, + {"EM_960", Const, 0, ""}, + {"EM_AARCH64", Const, 4, ""}, + {"EM_ALPHA", Const, 0, ""}, + {"EM_ALPHA_STD", Const, 0, ""}, + {"EM_ALTERA_NIOS2", Const, 11, ""}, + {"EM_AMDGPU", Const, 11, ""}, + {"EM_ARC", Const, 0, ""}, + {"EM_ARCA", Const, 11, ""}, + {"EM_ARC_COMPACT", Const, 11, ""}, + {"EM_ARC_COMPACT2", Const, 11, ""}, + {"EM_ARM", Const, 0, ""}, + {"EM_AVR", Const, 11, ""}, + {"EM_AVR32", Const, 11, ""}, + {"EM_BA1", Const, 11, ""}, + {"EM_BA2", Const, 11, ""}, + {"EM_BLACKFIN", Const, 11, ""}, + {"EM_BPF", Const, 11, ""}, + {"EM_C166", Const, 11, ""}, + {"EM_CDP", Const, 11, ""}, + {"EM_CE", Const, 11, ""}, + {"EM_CLOUDSHIELD", Const, 11, ""}, + {"EM_COGE", Const, 11, ""}, + {"EM_COLDFIRE", Const, 0, ""}, + {"EM_COOL", Const, 11, ""}, + {"EM_COREA_1ST", Const, 11, ""}, + {"EM_COREA_2ND", Const, 11, ""}, + {"EM_CR", Const, 11, ""}, + {"EM_CR16", Const, 11, ""}, + {"EM_CRAYNV2", Const, 11, ""}, + {"EM_CRIS", Const, 11, ""}, + {"EM_CRX", Const, 11, ""}, + {"EM_CSR_KALIMBA", Const, 11, ""}, + {"EM_CUDA", Const, 11, ""}, + {"EM_CYPRESS_M8C", Const, 11, ""}, + {"EM_D10V", Const, 11, ""}, + {"EM_D30V", Const, 11, ""}, + {"EM_DSP24", Const, 11, ""}, + {"EM_DSPIC30F", Const, 11, ""}, + {"EM_DXP", Const, 11, ""}, + {"EM_ECOG1", Const, 11, ""}, + {"EM_ECOG16", Const, 11, ""}, + {"EM_ECOG1X", Const, 11, ""}, + {"EM_ECOG2", Const, 11, ""}, + {"EM_ETPU", Const, 11, ""}, + {"EM_EXCESS", Const, 11, ""}, + {"EM_F2MC16", Const, 11, ""}, + {"EM_FIREPATH", Const, 11, ""}, + {"EM_FR20", Const, 0, ""}, + {"EM_FR30", Const, 11, ""}, + {"EM_FT32", Const, 11, ""}, + {"EM_FX66", Const, 11, ""}, + {"EM_H8S", Const, 0, ""}, + {"EM_H8_300", Const, 0, ""}, + {"EM_H8_300H", Const, 0, ""}, + {"EM_H8_500", Const, 0, ""}, + {"EM_HUANY", Const, 11, ""}, + {"EM_IA_64", Const, 0, ""}, + {"EM_INTEL205", Const, 11, ""}, + {"EM_INTEL206", Const, 11, ""}, + {"EM_INTEL207", Const, 11, ""}, + {"EM_INTEL208", Const, 11, ""}, + {"EM_INTEL209", Const, 11, ""}, + {"EM_IP2K", Const, 11, ""}, + {"EM_JAVELIN", Const, 11, ""}, + {"EM_K10M", Const, 11, ""}, + {"EM_KM32", Const, 11, ""}, + {"EM_KMX16", Const, 11, ""}, + {"EM_KMX32", Const, 11, ""}, + {"EM_KMX8", Const, 11, ""}, + {"EM_KVARC", Const, 11, ""}, + {"EM_L10M", Const, 11, ""}, + {"EM_LANAI", Const, 11, ""}, + {"EM_LATTICEMICO32", Const, 11, ""}, + {"EM_LOONGARCH", Const, 19, ""}, + {"EM_M16C", Const, 11, ""}, + {"EM_M32", Const, 0, ""}, + {"EM_M32C", Const, 11, ""}, + {"EM_M32R", Const, 11, ""}, + {"EM_MANIK", Const, 11, ""}, + {"EM_MAX", Const, 11, ""}, + {"EM_MAXQ30", Const, 11, ""}, + {"EM_MCHP_PIC", Const, 11, ""}, + {"EM_MCST_ELBRUS", Const, 11, ""}, + {"EM_ME16", Const, 0, ""}, + {"EM_METAG", Const, 11, ""}, + {"EM_MICROBLAZE", Const, 11, ""}, + {"EM_MIPS", Const, 0, ""}, + {"EM_MIPS_RS3_LE", Const, 0, ""}, + {"EM_MIPS_RS4_BE", Const, 0, ""}, + {"EM_MIPS_X", Const, 0, ""}, + {"EM_MMA", Const, 0, ""}, + {"EM_MMDSP_PLUS", Const, 11, ""}, + {"EM_MMIX", Const, 11, ""}, + {"EM_MN10200", Const, 11, ""}, + {"EM_MN10300", Const, 11, ""}, + {"EM_MOXIE", Const, 11, ""}, + {"EM_MSP430", Const, 11, ""}, + {"EM_NCPU", Const, 0, ""}, + {"EM_NDR1", Const, 0, ""}, + {"EM_NDS32", Const, 11, ""}, + {"EM_NONE", Const, 0, ""}, + {"EM_NORC", Const, 11, ""}, + {"EM_NS32K", Const, 11, ""}, + {"EM_OPEN8", Const, 11, ""}, + {"EM_OPENRISC", Const, 11, ""}, + {"EM_PARISC", Const, 0, ""}, + {"EM_PCP", Const, 0, ""}, + {"EM_PDP10", Const, 11, ""}, + {"EM_PDP11", Const, 11, ""}, + {"EM_PDSP", Const, 11, ""}, + {"EM_PJ", Const, 11, ""}, + {"EM_PPC", Const, 0, ""}, + {"EM_PPC64", Const, 0, ""}, + {"EM_PRISM", Const, 11, ""}, + {"EM_QDSP6", Const, 11, ""}, + {"EM_R32C", Const, 11, ""}, + {"EM_RCE", Const, 0, ""}, + {"EM_RH32", Const, 0, ""}, + {"EM_RISCV", Const, 11, ""}, + {"EM_RL78", Const, 11, ""}, + {"EM_RS08", Const, 11, ""}, + {"EM_RX", Const, 11, ""}, + {"EM_S370", Const, 0, ""}, + {"EM_S390", Const, 0, ""}, + {"EM_SCORE7", Const, 11, ""}, + {"EM_SEP", Const, 11, ""}, + {"EM_SE_C17", Const, 11, ""}, + {"EM_SE_C33", Const, 11, ""}, + {"EM_SH", Const, 0, ""}, + {"EM_SHARC", Const, 11, ""}, + {"EM_SLE9X", Const, 11, ""}, + {"EM_SNP1K", Const, 11, ""}, + {"EM_SPARC", Const, 0, ""}, + {"EM_SPARC32PLUS", Const, 0, ""}, + {"EM_SPARCV9", Const, 0, ""}, + {"EM_ST100", Const, 0, ""}, + {"EM_ST19", Const, 11, ""}, + {"EM_ST200", Const, 11, ""}, + {"EM_ST7", Const, 11, ""}, + {"EM_ST9PLUS", Const, 11, ""}, + {"EM_STARCORE", Const, 0, ""}, + {"EM_STM8", Const, 11, ""}, + {"EM_STXP7X", Const, 11, ""}, + {"EM_SVX", Const, 11, ""}, + {"EM_TILE64", Const, 11, ""}, + {"EM_TILEGX", Const, 11, ""}, + {"EM_TILEPRO", Const, 11, ""}, + {"EM_TINYJ", Const, 0, ""}, + {"EM_TI_ARP32", Const, 11, ""}, + {"EM_TI_C2000", Const, 11, ""}, + {"EM_TI_C5500", Const, 11, ""}, + {"EM_TI_C6000", Const, 11, ""}, + {"EM_TI_PRU", Const, 11, ""}, + {"EM_TMM_GPP", Const, 11, ""}, + {"EM_TPC", Const, 11, ""}, + {"EM_TRICORE", Const, 0, ""}, + {"EM_TRIMEDIA", Const, 11, ""}, + {"EM_TSK3000", Const, 11, ""}, + {"EM_UNICORE", Const, 11, ""}, + {"EM_V800", Const, 0, ""}, + {"EM_V850", Const, 11, ""}, + {"EM_VAX", Const, 11, ""}, + {"EM_VIDEOCORE", Const, 11, ""}, + {"EM_VIDEOCORE3", Const, 11, ""}, + {"EM_VIDEOCORE5", Const, 11, ""}, + {"EM_VISIUM", Const, 11, ""}, + {"EM_VPP500", Const, 0, ""}, + {"EM_X86_64", Const, 0, ""}, + {"EM_XCORE", Const, 11, ""}, + {"EM_XGATE", Const, 11, ""}, + {"EM_XIMO16", Const, 11, ""}, + {"EM_XTENSA", Const, 11, ""}, + {"EM_Z80", Const, 11, ""}, + {"EM_ZSP", Const, 11, ""}, + {"ET_CORE", Const, 0, ""}, + {"ET_DYN", Const, 0, ""}, + {"ET_EXEC", Const, 0, ""}, + {"ET_HIOS", Const, 0, ""}, + {"ET_HIPROC", Const, 0, ""}, + {"ET_LOOS", Const, 0, ""}, + {"ET_LOPROC", Const, 0, ""}, + {"ET_NONE", Const, 0, ""}, + {"ET_REL", Const, 0, ""}, + {"EV_CURRENT", Const, 0, ""}, + {"EV_NONE", Const, 0, ""}, + {"ErrNoSymbols", Var, 4, ""}, + {"File", Type, 0, ""}, + {"File.FileHeader", Field, 0, ""}, + {"File.Progs", Field, 0, ""}, + {"File.Sections", Field, 0, ""}, + {"FileHeader", Type, 0, ""}, + {"FileHeader.ABIVersion", Field, 0, ""}, + {"FileHeader.ByteOrder", Field, 0, ""}, + {"FileHeader.Class", Field, 0, ""}, + {"FileHeader.Data", Field, 0, ""}, + {"FileHeader.Entry", Field, 1, ""}, + {"FileHeader.Machine", Field, 0, ""}, + {"FileHeader.OSABI", Field, 0, ""}, + {"FileHeader.Type", Field, 0, ""}, + {"FileHeader.Version", Field, 0, ""}, + {"FormatError", Type, 0, ""}, + {"Header32", Type, 0, ""}, + {"Header32.Ehsize", Field, 0, ""}, + {"Header32.Entry", Field, 0, ""}, + {"Header32.Flags", Field, 0, ""}, + {"Header32.Ident", Field, 0, ""}, + {"Header32.Machine", Field, 0, ""}, + {"Header32.Phentsize", Field, 0, ""}, + {"Header32.Phnum", Field, 0, ""}, + {"Header32.Phoff", Field, 0, ""}, + {"Header32.Shentsize", Field, 0, ""}, + {"Header32.Shnum", Field, 0, ""}, + {"Header32.Shoff", Field, 0, ""}, + {"Header32.Shstrndx", Field, 0, ""}, + {"Header32.Type", Field, 0, ""}, + {"Header32.Version", Field, 0, ""}, + {"Header64", Type, 0, ""}, + {"Header64.Ehsize", Field, 0, ""}, + {"Header64.Entry", Field, 0, ""}, + {"Header64.Flags", Field, 0, ""}, + {"Header64.Ident", Field, 0, ""}, + {"Header64.Machine", Field, 0, ""}, + {"Header64.Phentsize", Field, 0, ""}, + {"Header64.Phnum", Field, 0, ""}, + {"Header64.Phoff", Field, 0, ""}, + {"Header64.Shentsize", Field, 0, ""}, + {"Header64.Shnum", Field, 0, ""}, + {"Header64.Shoff", Field, 0, ""}, + {"Header64.Shstrndx", Field, 0, ""}, + {"Header64.Type", Field, 0, ""}, + {"Header64.Version", Field, 0, ""}, + {"ImportedSymbol", Type, 0, ""}, + {"ImportedSymbol.Library", Field, 0, ""}, + {"ImportedSymbol.Name", Field, 0, ""}, + {"ImportedSymbol.Version", Field, 0, ""}, + {"Machine", Type, 0, ""}, + {"NT_FPREGSET", Const, 0, ""}, + {"NT_PRPSINFO", Const, 0, ""}, + {"NT_PRSTATUS", Const, 0, ""}, + {"NType", Type, 0, ""}, + {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"}, + {"OSABI", Type, 0, ""}, + {"Open", Func, 0, "func(name string) (*File, error)"}, + {"PF_MASKOS", Const, 0, ""}, + {"PF_MASKPROC", Const, 0, ""}, + {"PF_R", Const, 0, ""}, + {"PF_W", Const, 0, ""}, + {"PF_X", Const, 0, ""}, + {"PT_AARCH64_ARCHEXT", Const, 16, ""}, + {"PT_AARCH64_UNWIND", Const, 16, ""}, + {"PT_ARM_ARCHEXT", Const, 16, ""}, + {"PT_ARM_EXIDX", Const, 16, ""}, + {"PT_DYNAMIC", Const, 0, ""}, + {"PT_GNU_EH_FRAME", Const, 16, ""}, + {"PT_GNU_MBIND_HI", Const, 16, ""}, + {"PT_GNU_MBIND_LO", Const, 16, ""}, + {"PT_GNU_PROPERTY", Const, 16, ""}, + {"PT_GNU_RELRO", Const, 16, ""}, + {"PT_GNU_STACK", Const, 16, ""}, + {"PT_HIOS", Const, 0, ""}, + {"PT_HIPROC", Const, 0, ""}, + {"PT_INTERP", Const, 0, ""}, + {"PT_LOAD", Const, 0, ""}, + {"PT_LOOS", Const, 0, ""}, + {"PT_LOPROC", Const, 0, ""}, + {"PT_MIPS_ABIFLAGS", Const, 16, ""}, + {"PT_MIPS_OPTIONS", Const, 16, ""}, + {"PT_MIPS_REGINFO", Const, 16, ""}, + {"PT_MIPS_RTPROC", Const, 16, ""}, + {"PT_NOTE", Const, 0, ""}, + {"PT_NULL", Const, 0, ""}, + {"PT_OPENBSD_BOOTDATA", Const, 16, ""}, + {"PT_OPENBSD_NOBTCFI", Const, 23, ""}, + {"PT_OPENBSD_RANDOMIZE", Const, 16, ""}, + {"PT_OPENBSD_WXNEEDED", Const, 16, ""}, + {"PT_PAX_FLAGS", Const, 16, ""}, + {"PT_PHDR", Const, 0, ""}, + {"PT_RISCV_ATTRIBUTES", Const, 25, ""}, + {"PT_S390_PGSTE", Const, 16, ""}, + {"PT_SHLIB", Const, 0, ""}, + {"PT_SUNWSTACK", Const, 16, ""}, + {"PT_SUNW_EH_FRAME", Const, 16, ""}, + {"PT_TLS", Const, 0, ""}, + {"Prog", Type, 0, ""}, + {"Prog.ProgHeader", Field, 0, ""}, + {"Prog.ReaderAt", Field, 0, ""}, + {"Prog32", Type, 0, ""}, + {"Prog32.Align", Field, 0, ""}, + {"Prog32.Filesz", Field, 0, ""}, + {"Prog32.Flags", Field, 0, ""}, + {"Prog32.Memsz", Field, 0, ""}, + {"Prog32.Off", Field, 0, ""}, + {"Prog32.Paddr", Field, 0, ""}, + {"Prog32.Type", Field, 0, ""}, + {"Prog32.Vaddr", Field, 0, ""}, + {"Prog64", Type, 0, ""}, + {"Prog64.Align", Field, 0, ""}, + {"Prog64.Filesz", Field, 0, ""}, + {"Prog64.Flags", Field, 0, ""}, + {"Prog64.Memsz", Field, 0, ""}, + {"Prog64.Off", Field, 0, ""}, + {"Prog64.Paddr", Field, 0, ""}, + {"Prog64.Type", Field, 0, ""}, + {"Prog64.Vaddr", Field, 0, ""}, + {"ProgFlag", Type, 0, ""}, + {"ProgHeader", Type, 0, ""}, + {"ProgHeader.Align", Field, 0, ""}, + {"ProgHeader.Filesz", Field, 0, ""}, + {"ProgHeader.Flags", Field, 0, ""}, + {"ProgHeader.Memsz", Field, 0, ""}, + {"ProgHeader.Off", Field, 0, ""}, + {"ProgHeader.Paddr", Field, 0, ""}, + {"ProgHeader.Type", Field, 0, ""}, + {"ProgHeader.Vaddr", Field, 0, ""}, + {"ProgType", Type, 0, ""}, + {"R_386", Type, 0, ""}, + {"R_386_16", Const, 10, ""}, + {"R_386_32", Const, 0, ""}, + {"R_386_32PLT", Const, 10, ""}, + {"R_386_8", Const, 10, ""}, + {"R_386_COPY", Const, 0, ""}, + {"R_386_GLOB_DAT", Const, 0, ""}, + {"R_386_GOT32", Const, 0, ""}, + {"R_386_GOT32X", Const, 10, ""}, + {"R_386_GOTOFF", Const, 0, ""}, + {"R_386_GOTPC", Const, 0, ""}, + {"R_386_IRELATIVE", Const, 10, ""}, + {"R_386_JMP_SLOT", Const, 0, ""}, + {"R_386_NONE", Const, 0, ""}, + {"R_386_PC16", Const, 10, ""}, + {"R_386_PC32", Const, 0, ""}, + {"R_386_PC8", Const, 10, ""}, + {"R_386_PLT32", Const, 0, ""}, + {"R_386_RELATIVE", Const, 0, ""}, + {"R_386_SIZE32", Const, 10, ""}, + {"R_386_TLS_DESC", Const, 10, ""}, + {"R_386_TLS_DESC_CALL", Const, 10, ""}, + {"R_386_TLS_DTPMOD32", Const, 0, ""}, + {"R_386_TLS_DTPOFF32", Const, 0, ""}, + {"R_386_TLS_GD", Const, 0, ""}, + {"R_386_TLS_GD_32", Const, 0, ""}, + {"R_386_TLS_GD_CALL", Const, 0, ""}, + {"R_386_TLS_GD_POP", Const, 0, ""}, + {"R_386_TLS_GD_PUSH", Const, 0, ""}, + {"R_386_TLS_GOTDESC", Const, 10, ""}, + {"R_386_TLS_GOTIE", Const, 0, ""}, + {"R_386_TLS_IE", Const, 0, ""}, + {"R_386_TLS_IE_32", Const, 0, ""}, + {"R_386_TLS_LDM", Const, 0, ""}, + {"R_386_TLS_LDM_32", Const, 0, ""}, + {"R_386_TLS_LDM_CALL", Const, 0, ""}, + {"R_386_TLS_LDM_POP", Const, 0, ""}, + {"R_386_TLS_LDM_PUSH", Const, 0, ""}, + {"R_386_TLS_LDO_32", Const, 0, ""}, + {"R_386_TLS_LE", Const, 0, ""}, + {"R_386_TLS_LE_32", Const, 0, ""}, + {"R_386_TLS_TPOFF", Const, 0, ""}, + {"R_386_TLS_TPOFF32", Const, 0, ""}, + {"R_390", Type, 7, ""}, + {"R_390_12", Const, 7, ""}, + {"R_390_16", Const, 7, ""}, + {"R_390_20", Const, 7, ""}, + {"R_390_32", Const, 7, ""}, + {"R_390_64", Const, 7, ""}, + {"R_390_8", Const, 7, ""}, + {"R_390_COPY", Const, 7, ""}, + {"R_390_GLOB_DAT", Const, 7, ""}, + {"R_390_GOT12", Const, 7, ""}, + {"R_390_GOT16", Const, 7, ""}, + {"R_390_GOT20", Const, 7, ""}, + {"R_390_GOT32", Const, 7, ""}, + {"R_390_GOT64", Const, 7, ""}, + {"R_390_GOTENT", Const, 7, ""}, + {"R_390_GOTOFF", Const, 7, ""}, + {"R_390_GOTOFF16", Const, 7, ""}, + {"R_390_GOTOFF64", Const, 7, ""}, + {"R_390_GOTPC", Const, 7, ""}, + {"R_390_GOTPCDBL", Const, 7, ""}, + {"R_390_GOTPLT12", Const, 7, ""}, + {"R_390_GOTPLT16", Const, 7, ""}, + {"R_390_GOTPLT20", Const, 7, ""}, + {"R_390_GOTPLT32", Const, 7, ""}, + {"R_390_GOTPLT64", Const, 7, ""}, + {"R_390_GOTPLTENT", Const, 7, ""}, + {"R_390_GOTPLTOFF16", Const, 7, ""}, + {"R_390_GOTPLTOFF32", Const, 7, ""}, + {"R_390_GOTPLTOFF64", Const, 7, ""}, + {"R_390_JMP_SLOT", Const, 7, ""}, + {"R_390_NONE", Const, 7, ""}, + {"R_390_PC16", Const, 7, ""}, + {"R_390_PC16DBL", Const, 7, ""}, + {"R_390_PC32", Const, 7, ""}, + {"R_390_PC32DBL", Const, 7, ""}, + {"R_390_PC64", Const, 7, ""}, + {"R_390_PLT16DBL", Const, 7, ""}, + {"R_390_PLT32", Const, 7, ""}, + {"R_390_PLT32DBL", Const, 7, ""}, + {"R_390_PLT64", Const, 7, ""}, + {"R_390_RELATIVE", Const, 7, ""}, + {"R_390_TLS_DTPMOD", Const, 7, ""}, + {"R_390_TLS_DTPOFF", Const, 7, ""}, + {"R_390_TLS_GD32", Const, 7, ""}, + {"R_390_TLS_GD64", Const, 7, ""}, + {"R_390_TLS_GDCALL", Const, 7, ""}, + {"R_390_TLS_GOTIE12", Const, 7, ""}, + {"R_390_TLS_GOTIE20", Const, 7, ""}, + {"R_390_TLS_GOTIE32", Const, 7, ""}, + {"R_390_TLS_GOTIE64", Const, 7, ""}, + {"R_390_TLS_IE32", Const, 7, ""}, + {"R_390_TLS_IE64", Const, 7, ""}, + {"R_390_TLS_IEENT", Const, 7, ""}, + {"R_390_TLS_LDCALL", Const, 7, ""}, + {"R_390_TLS_LDM32", Const, 7, ""}, + {"R_390_TLS_LDM64", Const, 7, ""}, + {"R_390_TLS_LDO32", Const, 7, ""}, + {"R_390_TLS_LDO64", Const, 7, ""}, + {"R_390_TLS_LE32", Const, 7, ""}, + {"R_390_TLS_LE64", Const, 7, ""}, + {"R_390_TLS_LOAD", Const, 7, ""}, + {"R_390_TLS_TPOFF", Const, 7, ""}, + {"R_AARCH64", Type, 4, ""}, + {"R_AARCH64_ABS16", Const, 4, ""}, + {"R_AARCH64_ABS32", Const, 4, ""}, + {"R_AARCH64_ABS64", Const, 4, ""}, + {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_ADR_GOT_PAGE", Const, 4, ""}, + {"R_AARCH64_ADR_PREL_LO21", Const, 4, ""}, + {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4, ""}, + {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4, ""}, + {"R_AARCH64_CALL26", Const, 4, ""}, + {"R_AARCH64_CONDBR19", Const, 4, ""}, + {"R_AARCH64_COPY", Const, 4, ""}, + {"R_AARCH64_GLOB_DAT", Const, 4, ""}, + {"R_AARCH64_GOT_LD_PREL19", Const, 4, ""}, + {"R_AARCH64_IRELATIVE", Const, 4, ""}, + {"R_AARCH64_JUMP26", Const, 4, ""}, + {"R_AARCH64_JUMP_SLOT", Const, 4, ""}, + {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10, ""}, + {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10, ""}, + {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4, ""}, + {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_LD_PREL_LO19", Const, 4, ""}, + {"R_AARCH64_MOVW_SABS_G0", Const, 4, ""}, + {"R_AARCH64_MOVW_SABS_G1", Const, 4, ""}, + {"R_AARCH64_MOVW_SABS_G2", Const, 4, ""}, + {"R_AARCH64_MOVW_UABS_G0", Const, 4, ""}, + {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4, ""}, + {"R_AARCH64_MOVW_UABS_G1", Const, 4, ""}, + {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4, ""}, + {"R_AARCH64_MOVW_UABS_G2", Const, 4, ""}, + {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4, ""}, + {"R_AARCH64_MOVW_UABS_G3", Const, 4, ""}, + {"R_AARCH64_NONE", Const, 4, ""}, + {"R_AARCH64_NULL", Const, 4, ""}, + {"R_AARCH64_P32_ABS16", Const, 4, ""}, + {"R_AARCH64_P32_ABS32", Const, 4, ""}, + {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4, ""}, + {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4, ""}, + {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4, ""}, + {"R_AARCH64_P32_CALL26", Const, 4, ""}, + {"R_AARCH64_P32_CONDBR19", Const, 4, ""}, + {"R_AARCH64_P32_COPY", Const, 4, ""}, + {"R_AARCH64_P32_GLOB_DAT", Const, 4, ""}, + {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4, ""}, + {"R_AARCH64_P32_IRELATIVE", Const, 4, ""}, + {"R_AARCH64_P32_JUMP26", Const, 4, ""}, + {"R_AARCH64_P32_JUMP_SLOT", Const, 4, ""}, + {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_LD_PREL_LO19", Const, 4, ""}, + {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4, ""}, + {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4, ""}, + {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4, ""}, + {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4, ""}, + {"R_AARCH64_P32_PREL16", Const, 4, ""}, + {"R_AARCH64_P32_PREL32", Const, 4, ""}, + {"R_AARCH64_P32_RELATIVE", Const, 4, ""}, + {"R_AARCH64_P32_TLSDESC", Const, 4, ""}, + {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4, ""}, + {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4, ""}, + {"R_AARCH64_P32_TLSDESC_CALL", Const, 4, ""}, + {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4, ""}, + {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4, ""}, + {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""}, + {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4, ""}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4, ""}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4, ""}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4, ""}, + {"R_AARCH64_P32_TLS_DTPMOD", Const, 4, ""}, + {"R_AARCH64_P32_TLS_DTPREL", Const, 4, ""}, + {"R_AARCH64_P32_TLS_TPREL", Const, 4, ""}, + {"R_AARCH64_P32_TSTBR14", Const, 4, ""}, + {"R_AARCH64_PREL16", Const, 4, ""}, + {"R_AARCH64_PREL32", Const, 4, ""}, + {"R_AARCH64_PREL64", Const, 4, ""}, + {"R_AARCH64_RELATIVE", Const, 4, ""}, + {"R_AARCH64_TLSDESC", Const, 4, ""}, + {"R_AARCH64_TLSDESC_ADD", Const, 4, ""}, + {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4, ""}, + {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4, ""}, + {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4, ""}, + {"R_AARCH64_TLSDESC_CALL", Const, 4, ""}, + {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4, ""}, + {"R_AARCH64_TLSDESC_LDR", Const, 4, ""}, + {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4, ""}, + {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4, ""}, + {"R_AARCH64_TLSDESC_OFF_G1", Const, 4, ""}, + {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4, ""}, + {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4, ""}, + {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10, ""}, + {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10, ""}, + {"R_AARCH64_TLSGD_MOVW_G1", Const, 10, ""}, + {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""}, + {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4, ""}, + {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4, ""}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4, ""}, + {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10, ""}, + {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10, ""}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10, ""}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10, ""}, + {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4, ""}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4, ""}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10, ""}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10, ""}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4, ""}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4, ""}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4, ""}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4, ""}, + {"R_AARCH64_TLS_DTPMOD64", Const, 4, ""}, + {"R_AARCH64_TLS_DTPREL64", Const, 4, ""}, + {"R_AARCH64_TLS_TPREL64", Const, 4, ""}, + {"R_AARCH64_TSTBR14", Const, 4, ""}, + {"R_ALPHA", Type, 0, ""}, + {"R_ALPHA_BRADDR", Const, 0, ""}, + {"R_ALPHA_COPY", Const, 0, ""}, + {"R_ALPHA_GLOB_DAT", Const, 0, ""}, + {"R_ALPHA_GPDISP", Const, 0, ""}, + {"R_ALPHA_GPREL32", Const, 0, ""}, + {"R_ALPHA_GPRELHIGH", Const, 0, ""}, + {"R_ALPHA_GPRELLOW", Const, 0, ""}, + {"R_ALPHA_GPVALUE", Const, 0, ""}, + {"R_ALPHA_HINT", Const, 0, ""}, + {"R_ALPHA_IMMED_BR_HI32", Const, 0, ""}, + {"R_ALPHA_IMMED_GP_16", Const, 0, ""}, + {"R_ALPHA_IMMED_GP_HI32", Const, 0, ""}, + {"R_ALPHA_IMMED_LO32", Const, 0, ""}, + {"R_ALPHA_IMMED_SCN_HI32", Const, 0, ""}, + {"R_ALPHA_JMP_SLOT", Const, 0, ""}, + {"R_ALPHA_LITERAL", Const, 0, ""}, + {"R_ALPHA_LITUSE", Const, 0, ""}, + {"R_ALPHA_NONE", Const, 0, ""}, + {"R_ALPHA_OP_PRSHIFT", Const, 0, ""}, + {"R_ALPHA_OP_PSUB", Const, 0, ""}, + {"R_ALPHA_OP_PUSH", Const, 0, ""}, + {"R_ALPHA_OP_STORE", Const, 0, ""}, + {"R_ALPHA_REFLONG", Const, 0, ""}, + {"R_ALPHA_REFQUAD", Const, 0, ""}, + {"R_ALPHA_RELATIVE", Const, 0, ""}, + {"R_ALPHA_SREL16", Const, 0, ""}, + {"R_ALPHA_SREL32", Const, 0, ""}, + {"R_ALPHA_SREL64", Const, 0, ""}, + {"R_ARM", Type, 0, ""}, + {"R_ARM_ABS12", Const, 0, ""}, + {"R_ARM_ABS16", Const, 0, ""}, + {"R_ARM_ABS32", Const, 0, ""}, + {"R_ARM_ABS32_NOI", Const, 10, ""}, + {"R_ARM_ABS8", Const, 0, ""}, + {"R_ARM_ALU_PCREL_15_8", Const, 10, ""}, + {"R_ARM_ALU_PCREL_23_15", Const, 10, ""}, + {"R_ARM_ALU_PCREL_7_0", Const, 10, ""}, + {"R_ARM_ALU_PC_G0", Const, 10, ""}, + {"R_ARM_ALU_PC_G0_NC", Const, 10, ""}, + {"R_ARM_ALU_PC_G1", Const, 10, ""}, + {"R_ARM_ALU_PC_G1_NC", Const, 10, ""}, + {"R_ARM_ALU_PC_G2", Const, 10, ""}, + {"R_ARM_ALU_SBREL_19_12_NC", Const, 10, ""}, + {"R_ARM_ALU_SBREL_27_20_CK", Const, 10, ""}, + {"R_ARM_ALU_SB_G0", Const, 10, ""}, + {"R_ARM_ALU_SB_G0_NC", Const, 10, ""}, + {"R_ARM_ALU_SB_G1", Const, 10, ""}, + {"R_ARM_ALU_SB_G1_NC", Const, 10, ""}, + {"R_ARM_ALU_SB_G2", Const, 10, ""}, + {"R_ARM_AMP_VCALL9", Const, 0, ""}, + {"R_ARM_BASE_ABS", Const, 10, ""}, + {"R_ARM_CALL", Const, 10, ""}, + {"R_ARM_COPY", Const, 0, ""}, + {"R_ARM_GLOB_DAT", Const, 0, ""}, + {"R_ARM_GNU_VTENTRY", Const, 0, ""}, + {"R_ARM_GNU_VTINHERIT", Const, 0, ""}, + {"R_ARM_GOT32", Const, 0, ""}, + {"R_ARM_GOTOFF", Const, 0, ""}, + {"R_ARM_GOTOFF12", Const, 10, ""}, + {"R_ARM_GOTPC", Const, 0, ""}, + {"R_ARM_GOTRELAX", Const, 10, ""}, + {"R_ARM_GOT_ABS", Const, 10, ""}, + {"R_ARM_GOT_BREL12", Const, 10, ""}, + {"R_ARM_GOT_PREL", Const, 10, ""}, + {"R_ARM_IRELATIVE", Const, 10, ""}, + {"R_ARM_JUMP24", Const, 10, ""}, + {"R_ARM_JUMP_SLOT", Const, 0, ""}, + {"R_ARM_LDC_PC_G0", Const, 10, ""}, + {"R_ARM_LDC_PC_G1", Const, 10, ""}, + {"R_ARM_LDC_PC_G2", Const, 10, ""}, + {"R_ARM_LDC_SB_G0", Const, 10, ""}, + {"R_ARM_LDC_SB_G1", Const, 10, ""}, + {"R_ARM_LDC_SB_G2", Const, 10, ""}, + {"R_ARM_LDRS_PC_G0", Const, 10, ""}, + {"R_ARM_LDRS_PC_G1", Const, 10, ""}, + {"R_ARM_LDRS_PC_G2", Const, 10, ""}, + {"R_ARM_LDRS_SB_G0", Const, 10, ""}, + {"R_ARM_LDRS_SB_G1", Const, 10, ""}, + {"R_ARM_LDRS_SB_G2", Const, 10, ""}, + {"R_ARM_LDR_PC_G1", Const, 10, ""}, + {"R_ARM_LDR_PC_G2", Const, 10, ""}, + {"R_ARM_LDR_SBREL_11_10_NC", Const, 10, ""}, + {"R_ARM_LDR_SB_G0", Const, 10, ""}, + {"R_ARM_LDR_SB_G1", Const, 10, ""}, + {"R_ARM_LDR_SB_G2", Const, 10, ""}, + {"R_ARM_ME_TOO", Const, 10, ""}, + {"R_ARM_MOVT_ABS", Const, 10, ""}, + {"R_ARM_MOVT_BREL", Const, 10, ""}, + {"R_ARM_MOVT_PREL", Const, 10, ""}, + {"R_ARM_MOVW_ABS_NC", Const, 10, ""}, + {"R_ARM_MOVW_BREL", Const, 10, ""}, + {"R_ARM_MOVW_BREL_NC", Const, 10, ""}, + {"R_ARM_MOVW_PREL_NC", Const, 10, ""}, + {"R_ARM_NONE", Const, 0, ""}, + {"R_ARM_PC13", Const, 0, ""}, + {"R_ARM_PC24", Const, 0, ""}, + {"R_ARM_PLT32", Const, 0, ""}, + {"R_ARM_PLT32_ABS", Const, 10, ""}, + {"R_ARM_PREL31", Const, 10, ""}, + {"R_ARM_PRIVATE_0", Const, 10, ""}, + {"R_ARM_PRIVATE_1", Const, 10, ""}, + {"R_ARM_PRIVATE_10", Const, 10, ""}, + {"R_ARM_PRIVATE_11", Const, 10, ""}, + {"R_ARM_PRIVATE_12", Const, 10, ""}, + {"R_ARM_PRIVATE_13", Const, 10, ""}, + {"R_ARM_PRIVATE_14", Const, 10, ""}, + {"R_ARM_PRIVATE_15", Const, 10, ""}, + {"R_ARM_PRIVATE_2", Const, 10, ""}, + {"R_ARM_PRIVATE_3", Const, 10, ""}, + {"R_ARM_PRIVATE_4", Const, 10, ""}, + {"R_ARM_PRIVATE_5", Const, 10, ""}, + {"R_ARM_PRIVATE_6", Const, 10, ""}, + {"R_ARM_PRIVATE_7", Const, 10, ""}, + {"R_ARM_PRIVATE_8", Const, 10, ""}, + {"R_ARM_PRIVATE_9", Const, 10, ""}, + {"R_ARM_RABS32", Const, 0, ""}, + {"R_ARM_RBASE", Const, 0, ""}, + {"R_ARM_REL32", Const, 0, ""}, + {"R_ARM_REL32_NOI", Const, 10, ""}, + {"R_ARM_RELATIVE", Const, 0, ""}, + {"R_ARM_RPC24", Const, 0, ""}, + {"R_ARM_RREL32", Const, 0, ""}, + {"R_ARM_RSBREL32", Const, 0, ""}, + {"R_ARM_RXPC25", Const, 10, ""}, + {"R_ARM_SBREL31", Const, 10, ""}, + {"R_ARM_SBREL32", Const, 0, ""}, + {"R_ARM_SWI24", Const, 0, ""}, + {"R_ARM_TARGET1", Const, 10, ""}, + {"R_ARM_TARGET2", Const, 10, ""}, + {"R_ARM_THM_ABS5", Const, 0, ""}, + {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10, ""}, + {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10, ""}, + {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10, ""}, + {"R_ARM_THM_ALU_ABS_G3", Const, 10, ""}, + {"R_ARM_THM_ALU_PREL_11_0", Const, 10, ""}, + {"R_ARM_THM_GOT_BREL12", Const, 10, ""}, + {"R_ARM_THM_JUMP11", Const, 10, ""}, + {"R_ARM_THM_JUMP19", Const, 10, ""}, + {"R_ARM_THM_JUMP24", Const, 10, ""}, + {"R_ARM_THM_JUMP6", Const, 10, ""}, + {"R_ARM_THM_JUMP8", Const, 10, ""}, + {"R_ARM_THM_MOVT_ABS", Const, 10, ""}, + {"R_ARM_THM_MOVT_BREL", Const, 10, ""}, + {"R_ARM_THM_MOVT_PREL", Const, 10, ""}, + {"R_ARM_THM_MOVW_ABS_NC", Const, 10, ""}, + {"R_ARM_THM_MOVW_BREL", Const, 10, ""}, + {"R_ARM_THM_MOVW_BREL_NC", Const, 10, ""}, + {"R_ARM_THM_MOVW_PREL_NC", Const, 10, ""}, + {"R_ARM_THM_PC12", Const, 10, ""}, + {"R_ARM_THM_PC22", Const, 0, ""}, + {"R_ARM_THM_PC8", Const, 0, ""}, + {"R_ARM_THM_RPC22", Const, 0, ""}, + {"R_ARM_THM_SWI8", Const, 0, ""}, + {"R_ARM_THM_TLS_CALL", Const, 10, ""}, + {"R_ARM_THM_TLS_DESCSEQ16", Const, 10, ""}, + {"R_ARM_THM_TLS_DESCSEQ32", Const, 10, ""}, + {"R_ARM_THM_XPC22", Const, 0, ""}, + {"R_ARM_TLS_CALL", Const, 10, ""}, + {"R_ARM_TLS_DESCSEQ", Const, 10, ""}, + {"R_ARM_TLS_DTPMOD32", Const, 10, ""}, + {"R_ARM_TLS_DTPOFF32", Const, 10, ""}, + {"R_ARM_TLS_GD32", Const, 10, ""}, + {"R_ARM_TLS_GOTDESC", Const, 10, ""}, + {"R_ARM_TLS_IE12GP", Const, 10, ""}, + {"R_ARM_TLS_IE32", Const, 10, ""}, + {"R_ARM_TLS_LDM32", Const, 10, ""}, + {"R_ARM_TLS_LDO12", Const, 10, ""}, + {"R_ARM_TLS_LDO32", Const, 10, ""}, + {"R_ARM_TLS_LE12", Const, 10, ""}, + {"R_ARM_TLS_LE32", Const, 10, ""}, + {"R_ARM_TLS_TPOFF32", Const, 10, ""}, + {"R_ARM_V4BX", Const, 10, ""}, + {"R_ARM_XPC25", Const, 0, ""}, + {"R_INFO", Func, 0, "func(sym uint32, typ uint32) uint64"}, + {"R_INFO32", Func, 0, "func(sym uint32, typ uint32) uint32"}, + {"R_LARCH", Type, 19, ""}, + {"R_LARCH_32", Const, 19, ""}, + {"R_LARCH_32_PCREL", Const, 20, ""}, + {"R_LARCH_64", Const, 19, ""}, + {"R_LARCH_64_PCREL", Const, 22, ""}, + {"R_LARCH_ABS64_HI12", Const, 20, ""}, + {"R_LARCH_ABS64_LO20", Const, 20, ""}, + {"R_LARCH_ABS_HI20", Const, 20, ""}, + {"R_LARCH_ABS_LO12", Const, 20, ""}, + {"R_LARCH_ADD16", Const, 19, ""}, + {"R_LARCH_ADD24", Const, 19, ""}, + {"R_LARCH_ADD32", Const, 19, ""}, + {"R_LARCH_ADD6", Const, 22, ""}, + {"R_LARCH_ADD64", Const, 19, ""}, + {"R_LARCH_ADD8", Const, 19, ""}, + {"R_LARCH_ADD_ULEB128", Const, 22, ""}, + {"R_LARCH_ALIGN", Const, 22, ""}, + {"R_LARCH_B16", Const, 20, ""}, + {"R_LARCH_B21", Const, 20, ""}, + {"R_LARCH_B26", Const, 20, ""}, + {"R_LARCH_CFA", Const, 22, ""}, + {"R_LARCH_COPY", Const, 19, ""}, + {"R_LARCH_DELETE", Const, 22, ""}, + {"R_LARCH_GNU_VTENTRY", Const, 20, ""}, + {"R_LARCH_GNU_VTINHERIT", Const, 20, ""}, + {"R_LARCH_GOT64_HI12", Const, 20, ""}, + {"R_LARCH_GOT64_LO20", Const, 20, ""}, + {"R_LARCH_GOT64_PC_HI12", Const, 20, ""}, + {"R_LARCH_GOT64_PC_LO20", Const, 20, ""}, + {"R_LARCH_GOT_HI20", Const, 20, ""}, + {"R_LARCH_GOT_LO12", Const, 20, ""}, + {"R_LARCH_GOT_PC_HI20", Const, 20, ""}, + {"R_LARCH_GOT_PC_LO12", Const, 20, ""}, + {"R_LARCH_IRELATIVE", Const, 19, ""}, + {"R_LARCH_JUMP_SLOT", Const, 19, ""}, + {"R_LARCH_MARK_LA", Const, 19, ""}, + {"R_LARCH_MARK_PCREL", Const, 19, ""}, + {"R_LARCH_NONE", Const, 19, ""}, + {"R_LARCH_PCALA64_HI12", Const, 20, ""}, + {"R_LARCH_PCALA64_LO20", Const, 20, ""}, + {"R_LARCH_PCALA_HI20", Const, 20, ""}, + {"R_LARCH_PCALA_LO12", Const, 20, ""}, + {"R_LARCH_PCREL20_S2", Const, 22, ""}, + {"R_LARCH_RELATIVE", Const, 19, ""}, + {"R_LARCH_RELAX", Const, 20, ""}, + {"R_LARCH_SOP_ADD", Const, 19, ""}, + {"R_LARCH_SOP_AND", Const, 19, ""}, + {"R_LARCH_SOP_ASSERT", Const, 19, ""}, + {"R_LARCH_SOP_IF_ELSE", Const, 19, ""}, + {"R_LARCH_SOP_NOT", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_S_10_12", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_S_10_16", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_S_10_5", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_S_5_20", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_U", Const, 19, ""}, + {"R_LARCH_SOP_POP_32_U_10_12", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_DUP", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_GPREL", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_PCREL", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19, ""}, + {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19, ""}, + {"R_LARCH_SOP_SL", Const, 19, ""}, + {"R_LARCH_SOP_SR", Const, 19, ""}, + {"R_LARCH_SOP_SUB", Const, 19, ""}, + {"R_LARCH_SUB16", Const, 19, ""}, + {"R_LARCH_SUB24", Const, 19, ""}, + {"R_LARCH_SUB32", Const, 19, ""}, + {"R_LARCH_SUB6", Const, 22, ""}, + {"R_LARCH_SUB64", Const, 19, ""}, + {"R_LARCH_SUB8", Const, 19, ""}, + {"R_LARCH_SUB_ULEB128", Const, 22, ""}, + {"R_LARCH_TLS_DTPMOD32", Const, 19, ""}, + {"R_LARCH_TLS_DTPMOD64", Const, 19, ""}, + {"R_LARCH_TLS_DTPREL32", Const, 19, ""}, + {"R_LARCH_TLS_DTPREL64", Const, 19, ""}, + {"R_LARCH_TLS_GD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""}, + {"R_LARCH_TLS_IE64_HI12", Const, 20, ""}, + {"R_LARCH_TLS_IE64_LO20", Const, 20, ""}, + {"R_LARCH_TLS_IE64_PC_HI12", Const, 20, ""}, + {"R_LARCH_TLS_IE64_PC_LO20", Const, 20, ""}, + {"R_LARCH_TLS_IE_HI20", Const, 20, ""}, + {"R_LARCH_TLS_IE_LO12", Const, 20, ""}, + {"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""}, + {"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""}, + {"R_LARCH_TLS_LD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LE64_HI12", Const, 20, ""}, + {"R_LARCH_TLS_LE64_LO20", Const, 20, ""}, + {"R_LARCH_TLS_LE_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LE_LO12", Const, 20, ""}, + {"R_LARCH_TLS_TPREL32", Const, 19, ""}, + {"R_LARCH_TLS_TPREL64", Const, 19, ""}, + {"R_MIPS", Type, 6, ""}, + {"R_MIPS_16", Const, 6, ""}, + {"R_MIPS_26", Const, 6, ""}, + {"R_MIPS_32", Const, 6, ""}, + {"R_MIPS_64", Const, 6, ""}, + {"R_MIPS_ADD_IMMEDIATE", Const, 6, ""}, + {"R_MIPS_CALL16", Const, 6, ""}, + {"R_MIPS_CALL_HI16", Const, 6, ""}, + {"R_MIPS_CALL_LO16", Const, 6, ""}, + {"R_MIPS_DELETE", Const, 6, ""}, + {"R_MIPS_GOT16", Const, 6, ""}, + {"R_MIPS_GOT_DISP", Const, 6, ""}, + {"R_MIPS_GOT_HI16", Const, 6, ""}, + {"R_MIPS_GOT_LO16", Const, 6, ""}, + {"R_MIPS_GOT_OFST", Const, 6, ""}, + {"R_MIPS_GOT_PAGE", Const, 6, ""}, + {"R_MIPS_GPREL16", Const, 6, ""}, + {"R_MIPS_GPREL32", Const, 6, ""}, + {"R_MIPS_HI16", Const, 6, ""}, + {"R_MIPS_HIGHER", Const, 6, ""}, + {"R_MIPS_HIGHEST", Const, 6, ""}, + {"R_MIPS_INSERT_A", Const, 6, ""}, + {"R_MIPS_INSERT_B", Const, 6, ""}, + {"R_MIPS_JALR", Const, 6, ""}, + {"R_MIPS_LITERAL", Const, 6, ""}, + {"R_MIPS_LO16", Const, 6, ""}, + {"R_MIPS_NONE", Const, 6, ""}, + {"R_MIPS_PC16", Const, 6, ""}, + {"R_MIPS_PC32", Const, 22, ""}, + {"R_MIPS_PJUMP", Const, 6, ""}, + {"R_MIPS_REL16", Const, 6, ""}, + {"R_MIPS_REL32", Const, 6, ""}, + {"R_MIPS_RELGOT", Const, 6, ""}, + {"R_MIPS_SCN_DISP", Const, 6, ""}, + {"R_MIPS_SHIFT5", Const, 6, ""}, + {"R_MIPS_SHIFT6", Const, 6, ""}, + {"R_MIPS_SUB", Const, 6, ""}, + {"R_MIPS_TLS_DTPMOD32", Const, 6, ""}, + {"R_MIPS_TLS_DTPMOD64", Const, 6, ""}, + {"R_MIPS_TLS_DTPREL32", Const, 6, ""}, + {"R_MIPS_TLS_DTPREL64", Const, 6, ""}, + {"R_MIPS_TLS_DTPREL_HI16", Const, 6, ""}, + {"R_MIPS_TLS_DTPREL_LO16", Const, 6, ""}, + {"R_MIPS_TLS_GD", Const, 6, ""}, + {"R_MIPS_TLS_GOTTPREL", Const, 6, ""}, + {"R_MIPS_TLS_LDM", Const, 6, ""}, + {"R_MIPS_TLS_TPREL32", Const, 6, ""}, + {"R_MIPS_TLS_TPREL64", Const, 6, ""}, + {"R_MIPS_TLS_TPREL_HI16", Const, 6, ""}, + {"R_MIPS_TLS_TPREL_LO16", Const, 6, ""}, + {"R_PPC", Type, 0, ""}, + {"R_PPC64", Type, 5, ""}, + {"R_PPC64_ADDR14", Const, 5, ""}, + {"R_PPC64_ADDR14_BRNTAKEN", Const, 5, ""}, + {"R_PPC64_ADDR14_BRTAKEN", Const, 5, ""}, + {"R_PPC64_ADDR16", Const, 5, ""}, + {"R_PPC64_ADDR16_DS", Const, 5, ""}, + {"R_PPC64_ADDR16_HA", Const, 5, ""}, + {"R_PPC64_ADDR16_HI", Const, 5, ""}, + {"R_PPC64_ADDR16_HIGH", Const, 10, ""}, + {"R_PPC64_ADDR16_HIGHA", Const, 10, ""}, + {"R_PPC64_ADDR16_HIGHER", Const, 5, ""}, + {"R_PPC64_ADDR16_HIGHER34", Const, 20, ""}, + {"R_PPC64_ADDR16_HIGHERA", Const, 5, ""}, + {"R_PPC64_ADDR16_HIGHERA34", Const, 20, ""}, + {"R_PPC64_ADDR16_HIGHEST", Const, 5, ""}, + {"R_PPC64_ADDR16_HIGHEST34", Const, 20, ""}, + {"R_PPC64_ADDR16_HIGHESTA", Const, 5, ""}, + {"R_PPC64_ADDR16_HIGHESTA34", Const, 20, ""}, + {"R_PPC64_ADDR16_LO", Const, 5, ""}, + {"R_PPC64_ADDR16_LO_DS", Const, 5, ""}, + {"R_PPC64_ADDR24", Const, 5, ""}, + {"R_PPC64_ADDR32", Const, 5, ""}, + {"R_PPC64_ADDR64", Const, 5, ""}, + {"R_PPC64_ADDR64_LOCAL", Const, 10, ""}, + {"R_PPC64_COPY", Const, 20, ""}, + {"R_PPC64_D28", Const, 20, ""}, + {"R_PPC64_D34", Const, 20, ""}, + {"R_PPC64_D34_HA30", Const, 20, ""}, + {"R_PPC64_D34_HI30", Const, 20, ""}, + {"R_PPC64_D34_LO", Const, 20, ""}, + {"R_PPC64_DTPMOD64", Const, 5, ""}, + {"R_PPC64_DTPREL16", Const, 5, ""}, + {"R_PPC64_DTPREL16_DS", Const, 5, ""}, + {"R_PPC64_DTPREL16_HA", Const, 5, ""}, + {"R_PPC64_DTPREL16_HI", Const, 5, ""}, + {"R_PPC64_DTPREL16_HIGH", Const, 10, ""}, + {"R_PPC64_DTPREL16_HIGHA", Const, 10, ""}, + {"R_PPC64_DTPREL16_HIGHER", Const, 5, ""}, + {"R_PPC64_DTPREL16_HIGHERA", Const, 5, ""}, + {"R_PPC64_DTPREL16_HIGHEST", Const, 5, ""}, + {"R_PPC64_DTPREL16_HIGHESTA", Const, 5, ""}, + {"R_PPC64_DTPREL16_LO", Const, 5, ""}, + {"R_PPC64_DTPREL16_LO_DS", Const, 5, ""}, + {"R_PPC64_DTPREL34", Const, 20, ""}, + {"R_PPC64_DTPREL64", Const, 5, ""}, + {"R_PPC64_ENTRY", Const, 10, ""}, + {"R_PPC64_GLOB_DAT", Const, 20, ""}, + {"R_PPC64_GNU_VTENTRY", Const, 20, ""}, + {"R_PPC64_GNU_VTINHERIT", Const, 20, ""}, + {"R_PPC64_GOT16", Const, 5, ""}, + {"R_PPC64_GOT16_DS", Const, 5, ""}, + {"R_PPC64_GOT16_HA", Const, 5, ""}, + {"R_PPC64_GOT16_HI", Const, 5, ""}, + {"R_PPC64_GOT16_LO", Const, 5, ""}, + {"R_PPC64_GOT16_LO_DS", Const, 5, ""}, + {"R_PPC64_GOT_DTPREL16_DS", Const, 5, ""}, + {"R_PPC64_GOT_DTPREL16_HA", Const, 5, ""}, + {"R_PPC64_GOT_DTPREL16_HI", Const, 5, ""}, + {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5, ""}, + {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20, ""}, + {"R_PPC64_GOT_PCREL34", Const, 20, ""}, + {"R_PPC64_GOT_TLSGD16", Const, 5, ""}, + {"R_PPC64_GOT_TLSGD16_HA", Const, 5, ""}, + {"R_PPC64_GOT_TLSGD16_HI", Const, 5, ""}, + {"R_PPC64_GOT_TLSGD16_LO", Const, 5, ""}, + {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20, ""}, + {"R_PPC64_GOT_TLSLD16", Const, 5, ""}, + {"R_PPC64_GOT_TLSLD16_HA", Const, 5, ""}, + {"R_PPC64_GOT_TLSLD16_HI", Const, 5, ""}, + {"R_PPC64_GOT_TLSLD16_LO", Const, 5, ""}, + {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20, ""}, + {"R_PPC64_GOT_TPREL16_DS", Const, 5, ""}, + {"R_PPC64_GOT_TPREL16_HA", Const, 5, ""}, + {"R_PPC64_GOT_TPREL16_HI", Const, 5, ""}, + {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5, ""}, + {"R_PPC64_GOT_TPREL_PCREL34", Const, 20, ""}, + {"R_PPC64_IRELATIVE", Const, 10, ""}, + {"R_PPC64_JMP_IREL", Const, 10, ""}, + {"R_PPC64_JMP_SLOT", Const, 5, ""}, + {"R_PPC64_NONE", Const, 5, ""}, + {"R_PPC64_PCREL28", Const, 20, ""}, + {"R_PPC64_PCREL34", Const, 20, ""}, + {"R_PPC64_PCREL_OPT", Const, 20, ""}, + {"R_PPC64_PLT16_HA", Const, 20, ""}, + {"R_PPC64_PLT16_HI", Const, 20, ""}, + {"R_PPC64_PLT16_LO", Const, 20, ""}, + {"R_PPC64_PLT16_LO_DS", Const, 10, ""}, + {"R_PPC64_PLT32", Const, 20, ""}, + {"R_PPC64_PLT64", Const, 20, ""}, + {"R_PPC64_PLTCALL", Const, 20, ""}, + {"R_PPC64_PLTCALL_NOTOC", Const, 20, ""}, + {"R_PPC64_PLTGOT16", Const, 10, ""}, + {"R_PPC64_PLTGOT16_DS", Const, 10, ""}, + {"R_PPC64_PLTGOT16_HA", Const, 10, ""}, + {"R_PPC64_PLTGOT16_HI", Const, 10, ""}, + {"R_PPC64_PLTGOT16_LO", Const, 10, ""}, + {"R_PPC64_PLTGOT_LO_DS", Const, 10, ""}, + {"R_PPC64_PLTREL32", Const, 20, ""}, + {"R_PPC64_PLTREL64", Const, 20, ""}, + {"R_PPC64_PLTSEQ", Const, 20, ""}, + {"R_PPC64_PLTSEQ_NOTOC", Const, 20, ""}, + {"R_PPC64_PLT_PCREL34", Const, 20, ""}, + {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20, ""}, + {"R_PPC64_REL14", Const, 5, ""}, + {"R_PPC64_REL14_BRNTAKEN", Const, 5, ""}, + {"R_PPC64_REL14_BRTAKEN", Const, 5, ""}, + {"R_PPC64_REL16", Const, 5, ""}, + {"R_PPC64_REL16DX_HA", Const, 10, ""}, + {"R_PPC64_REL16_HA", Const, 5, ""}, + {"R_PPC64_REL16_HI", Const, 5, ""}, + {"R_PPC64_REL16_HIGH", Const, 20, ""}, + {"R_PPC64_REL16_HIGHA", Const, 20, ""}, + {"R_PPC64_REL16_HIGHER", Const, 20, ""}, + {"R_PPC64_REL16_HIGHER34", Const, 20, ""}, + {"R_PPC64_REL16_HIGHERA", Const, 20, ""}, + {"R_PPC64_REL16_HIGHERA34", Const, 20, ""}, + {"R_PPC64_REL16_HIGHEST", Const, 20, ""}, + {"R_PPC64_REL16_HIGHEST34", Const, 20, ""}, + {"R_PPC64_REL16_HIGHESTA", Const, 20, ""}, + {"R_PPC64_REL16_HIGHESTA34", Const, 20, ""}, + {"R_PPC64_REL16_LO", Const, 5, ""}, + {"R_PPC64_REL24", Const, 5, ""}, + {"R_PPC64_REL24_NOTOC", Const, 10, ""}, + {"R_PPC64_REL24_P9NOTOC", Const, 21, ""}, + {"R_PPC64_REL30", Const, 20, ""}, + {"R_PPC64_REL32", Const, 5, ""}, + {"R_PPC64_REL64", Const, 5, ""}, + {"R_PPC64_RELATIVE", Const, 18, ""}, + {"R_PPC64_SECTOFF", Const, 20, ""}, + {"R_PPC64_SECTOFF_DS", Const, 10, ""}, + {"R_PPC64_SECTOFF_HA", Const, 20, ""}, + {"R_PPC64_SECTOFF_HI", Const, 20, ""}, + {"R_PPC64_SECTOFF_LO", Const, 20, ""}, + {"R_PPC64_SECTOFF_LO_DS", Const, 10, ""}, + {"R_PPC64_TLS", Const, 5, ""}, + {"R_PPC64_TLSGD", Const, 5, ""}, + {"R_PPC64_TLSLD", Const, 5, ""}, + {"R_PPC64_TOC", Const, 5, ""}, + {"R_PPC64_TOC16", Const, 5, ""}, + {"R_PPC64_TOC16_DS", Const, 5, ""}, + {"R_PPC64_TOC16_HA", Const, 5, ""}, + {"R_PPC64_TOC16_HI", Const, 5, ""}, + {"R_PPC64_TOC16_LO", Const, 5, ""}, + {"R_PPC64_TOC16_LO_DS", Const, 5, ""}, + {"R_PPC64_TOCSAVE", Const, 10, ""}, + {"R_PPC64_TPREL16", Const, 5, ""}, + {"R_PPC64_TPREL16_DS", Const, 5, ""}, + {"R_PPC64_TPREL16_HA", Const, 5, ""}, + {"R_PPC64_TPREL16_HI", Const, 5, ""}, + {"R_PPC64_TPREL16_HIGH", Const, 10, ""}, + {"R_PPC64_TPREL16_HIGHA", Const, 10, ""}, + {"R_PPC64_TPREL16_HIGHER", Const, 5, ""}, + {"R_PPC64_TPREL16_HIGHERA", Const, 5, ""}, + {"R_PPC64_TPREL16_HIGHEST", Const, 5, ""}, + {"R_PPC64_TPREL16_HIGHESTA", Const, 5, ""}, + {"R_PPC64_TPREL16_LO", Const, 5, ""}, + {"R_PPC64_TPREL16_LO_DS", Const, 5, ""}, + {"R_PPC64_TPREL34", Const, 20, ""}, + {"R_PPC64_TPREL64", Const, 5, ""}, + {"R_PPC64_UADDR16", Const, 20, ""}, + {"R_PPC64_UADDR32", Const, 20, ""}, + {"R_PPC64_UADDR64", Const, 20, ""}, + {"R_PPC_ADDR14", Const, 0, ""}, + {"R_PPC_ADDR14_BRNTAKEN", Const, 0, ""}, + {"R_PPC_ADDR14_BRTAKEN", Const, 0, ""}, + {"R_PPC_ADDR16", Const, 0, ""}, + {"R_PPC_ADDR16_HA", Const, 0, ""}, + {"R_PPC_ADDR16_HI", Const, 0, ""}, + {"R_PPC_ADDR16_LO", Const, 0, ""}, + {"R_PPC_ADDR24", Const, 0, ""}, + {"R_PPC_ADDR32", Const, 0, ""}, + {"R_PPC_COPY", Const, 0, ""}, + {"R_PPC_DTPMOD32", Const, 0, ""}, + {"R_PPC_DTPREL16", Const, 0, ""}, + {"R_PPC_DTPREL16_HA", Const, 0, ""}, + {"R_PPC_DTPREL16_HI", Const, 0, ""}, + {"R_PPC_DTPREL16_LO", Const, 0, ""}, + {"R_PPC_DTPREL32", Const, 0, ""}, + {"R_PPC_EMB_BIT_FLD", Const, 0, ""}, + {"R_PPC_EMB_MRKREF", Const, 0, ""}, + {"R_PPC_EMB_NADDR16", Const, 0, ""}, + {"R_PPC_EMB_NADDR16_HA", Const, 0, ""}, + {"R_PPC_EMB_NADDR16_HI", Const, 0, ""}, + {"R_PPC_EMB_NADDR16_LO", Const, 0, ""}, + {"R_PPC_EMB_NADDR32", Const, 0, ""}, + {"R_PPC_EMB_RELSDA", Const, 0, ""}, + {"R_PPC_EMB_RELSEC16", Const, 0, ""}, + {"R_PPC_EMB_RELST_HA", Const, 0, ""}, + {"R_PPC_EMB_RELST_HI", Const, 0, ""}, + {"R_PPC_EMB_RELST_LO", Const, 0, ""}, + {"R_PPC_EMB_SDA21", Const, 0, ""}, + {"R_PPC_EMB_SDA2I16", Const, 0, ""}, + {"R_PPC_EMB_SDA2REL", Const, 0, ""}, + {"R_PPC_EMB_SDAI16", Const, 0, ""}, + {"R_PPC_GLOB_DAT", Const, 0, ""}, + {"R_PPC_GOT16", Const, 0, ""}, + {"R_PPC_GOT16_HA", Const, 0, ""}, + {"R_PPC_GOT16_HI", Const, 0, ""}, + {"R_PPC_GOT16_LO", Const, 0, ""}, + {"R_PPC_GOT_TLSGD16", Const, 0, ""}, + {"R_PPC_GOT_TLSGD16_HA", Const, 0, ""}, + {"R_PPC_GOT_TLSGD16_HI", Const, 0, ""}, + {"R_PPC_GOT_TLSGD16_LO", Const, 0, ""}, + {"R_PPC_GOT_TLSLD16", Const, 0, ""}, + {"R_PPC_GOT_TLSLD16_HA", Const, 0, ""}, + {"R_PPC_GOT_TLSLD16_HI", Const, 0, ""}, + {"R_PPC_GOT_TLSLD16_LO", Const, 0, ""}, + {"R_PPC_GOT_TPREL16", Const, 0, ""}, + {"R_PPC_GOT_TPREL16_HA", Const, 0, ""}, + {"R_PPC_GOT_TPREL16_HI", Const, 0, ""}, + {"R_PPC_GOT_TPREL16_LO", Const, 0, ""}, + {"R_PPC_JMP_SLOT", Const, 0, ""}, + {"R_PPC_LOCAL24PC", Const, 0, ""}, + {"R_PPC_NONE", Const, 0, ""}, + {"R_PPC_PLT16_HA", Const, 0, ""}, + {"R_PPC_PLT16_HI", Const, 0, ""}, + {"R_PPC_PLT16_LO", Const, 0, ""}, + {"R_PPC_PLT32", Const, 0, ""}, + {"R_PPC_PLTREL24", Const, 0, ""}, + {"R_PPC_PLTREL32", Const, 0, ""}, + {"R_PPC_REL14", Const, 0, ""}, + {"R_PPC_REL14_BRNTAKEN", Const, 0, ""}, + {"R_PPC_REL14_BRTAKEN", Const, 0, ""}, + {"R_PPC_REL24", Const, 0, ""}, + {"R_PPC_REL32", Const, 0, ""}, + {"R_PPC_RELATIVE", Const, 0, ""}, + {"R_PPC_SDAREL16", Const, 0, ""}, + {"R_PPC_SECTOFF", Const, 0, ""}, + {"R_PPC_SECTOFF_HA", Const, 0, ""}, + {"R_PPC_SECTOFF_HI", Const, 0, ""}, + {"R_PPC_SECTOFF_LO", Const, 0, ""}, + {"R_PPC_TLS", Const, 0, ""}, + {"R_PPC_TPREL16", Const, 0, ""}, + {"R_PPC_TPREL16_HA", Const, 0, ""}, + {"R_PPC_TPREL16_HI", Const, 0, ""}, + {"R_PPC_TPREL16_LO", Const, 0, ""}, + {"R_PPC_TPREL32", Const, 0, ""}, + {"R_PPC_UADDR16", Const, 0, ""}, + {"R_PPC_UADDR32", Const, 0, ""}, + {"R_RISCV", Type, 11, ""}, + {"R_RISCV_32", Const, 11, ""}, + {"R_RISCV_32_PCREL", Const, 12, ""}, + {"R_RISCV_64", Const, 11, ""}, + {"R_RISCV_ADD16", Const, 11, ""}, + {"R_RISCV_ADD32", Const, 11, ""}, + {"R_RISCV_ADD64", Const, 11, ""}, + {"R_RISCV_ADD8", Const, 11, ""}, + {"R_RISCV_ALIGN", Const, 11, ""}, + {"R_RISCV_BRANCH", Const, 11, ""}, + {"R_RISCV_CALL", Const, 11, ""}, + {"R_RISCV_CALL_PLT", Const, 11, ""}, + {"R_RISCV_COPY", Const, 11, ""}, + {"R_RISCV_GNU_VTENTRY", Const, 11, ""}, + {"R_RISCV_GNU_VTINHERIT", Const, 11, ""}, + {"R_RISCV_GOT_HI20", Const, 11, ""}, + {"R_RISCV_GPREL_I", Const, 11, ""}, + {"R_RISCV_GPREL_S", Const, 11, ""}, + {"R_RISCV_HI20", Const, 11, ""}, + {"R_RISCV_JAL", Const, 11, ""}, + {"R_RISCV_JUMP_SLOT", Const, 11, ""}, + {"R_RISCV_LO12_I", Const, 11, ""}, + {"R_RISCV_LO12_S", Const, 11, ""}, + {"R_RISCV_NONE", Const, 11, ""}, + {"R_RISCV_PCREL_HI20", Const, 11, ""}, + {"R_RISCV_PCREL_LO12_I", Const, 11, ""}, + {"R_RISCV_PCREL_LO12_S", Const, 11, ""}, + {"R_RISCV_RELATIVE", Const, 11, ""}, + {"R_RISCV_RELAX", Const, 11, ""}, + {"R_RISCV_RVC_BRANCH", Const, 11, ""}, + {"R_RISCV_RVC_JUMP", Const, 11, ""}, + {"R_RISCV_RVC_LUI", Const, 11, ""}, + {"R_RISCV_SET16", Const, 11, ""}, + {"R_RISCV_SET32", Const, 11, ""}, + {"R_RISCV_SET6", Const, 11, ""}, + {"R_RISCV_SET8", Const, 11, ""}, + {"R_RISCV_SUB16", Const, 11, ""}, + {"R_RISCV_SUB32", Const, 11, ""}, + {"R_RISCV_SUB6", Const, 11, ""}, + {"R_RISCV_SUB64", Const, 11, ""}, + {"R_RISCV_SUB8", Const, 11, ""}, + {"R_RISCV_TLS_DTPMOD32", Const, 11, ""}, + {"R_RISCV_TLS_DTPMOD64", Const, 11, ""}, + {"R_RISCV_TLS_DTPREL32", Const, 11, ""}, + {"R_RISCV_TLS_DTPREL64", Const, 11, ""}, + {"R_RISCV_TLS_GD_HI20", Const, 11, ""}, + {"R_RISCV_TLS_GOT_HI20", Const, 11, ""}, + {"R_RISCV_TLS_TPREL32", Const, 11, ""}, + {"R_RISCV_TLS_TPREL64", Const, 11, ""}, + {"R_RISCV_TPREL_ADD", Const, 11, ""}, + {"R_RISCV_TPREL_HI20", Const, 11, ""}, + {"R_RISCV_TPREL_I", Const, 11, ""}, + {"R_RISCV_TPREL_LO12_I", Const, 11, ""}, + {"R_RISCV_TPREL_LO12_S", Const, 11, ""}, + {"R_RISCV_TPREL_S", Const, 11, ""}, + {"R_SPARC", Type, 0, ""}, + {"R_SPARC_10", Const, 0, ""}, + {"R_SPARC_11", Const, 0, ""}, + {"R_SPARC_13", Const, 0, ""}, + {"R_SPARC_16", Const, 0, ""}, + {"R_SPARC_22", Const, 0, ""}, + {"R_SPARC_32", Const, 0, ""}, + {"R_SPARC_5", Const, 0, ""}, + {"R_SPARC_6", Const, 0, ""}, + {"R_SPARC_64", Const, 0, ""}, + {"R_SPARC_7", Const, 0, ""}, + {"R_SPARC_8", Const, 0, ""}, + {"R_SPARC_COPY", Const, 0, ""}, + {"R_SPARC_DISP16", Const, 0, ""}, + {"R_SPARC_DISP32", Const, 0, ""}, + {"R_SPARC_DISP64", Const, 0, ""}, + {"R_SPARC_DISP8", Const, 0, ""}, + {"R_SPARC_GLOB_DAT", Const, 0, ""}, + {"R_SPARC_GLOB_JMP", Const, 0, ""}, + {"R_SPARC_GOT10", Const, 0, ""}, + {"R_SPARC_GOT13", Const, 0, ""}, + {"R_SPARC_GOT22", Const, 0, ""}, + {"R_SPARC_H44", Const, 0, ""}, + {"R_SPARC_HH22", Const, 0, ""}, + {"R_SPARC_HI22", Const, 0, ""}, + {"R_SPARC_HIPLT22", Const, 0, ""}, + {"R_SPARC_HIX22", Const, 0, ""}, + {"R_SPARC_HM10", Const, 0, ""}, + {"R_SPARC_JMP_SLOT", Const, 0, ""}, + {"R_SPARC_L44", Const, 0, ""}, + {"R_SPARC_LM22", Const, 0, ""}, + {"R_SPARC_LO10", Const, 0, ""}, + {"R_SPARC_LOPLT10", Const, 0, ""}, + {"R_SPARC_LOX10", Const, 0, ""}, + {"R_SPARC_M44", Const, 0, ""}, + {"R_SPARC_NONE", Const, 0, ""}, + {"R_SPARC_OLO10", Const, 0, ""}, + {"R_SPARC_PC10", Const, 0, ""}, + {"R_SPARC_PC22", Const, 0, ""}, + {"R_SPARC_PCPLT10", Const, 0, ""}, + {"R_SPARC_PCPLT22", Const, 0, ""}, + {"R_SPARC_PCPLT32", Const, 0, ""}, + {"R_SPARC_PC_HH22", Const, 0, ""}, + {"R_SPARC_PC_HM10", Const, 0, ""}, + {"R_SPARC_PC_LM22", Const, 0, ""}, + {"R_SPARC_PLT32", Const, 0, ""}, + {"R_SPARC_PLT64", Const, 0, ""}, + {"R_SPARC_REGISTER", Const, 0, ""}, + {"R_SPARC_RELATIVE", Const, 0, ""}, + {"R_SPARC_UA16", Const, 0, ""}, + {"R_SPARC_UA32", Const, 0, ""}, + {"R_SPARC_UA64", Const, 0, ""}, + {"R_SPARC_WDISP16", Const, 0, ""}, + {"R_SPARC_WDISP19", Const, 0, ""}, + {"R_SPARC_WDISP22", Const, 0, ""}, + {"R_SPARC_WDISP30", Const, 0, ""}, + {"R_SPARC_WPLT30", Const, 0, ""}, + {"R_SYM32", Func, 0, "func(info uint32) uint32"}, + {"R_SYM64", Func, 0, "func(info uint64) uint32"}, + {"R_TYPE32", Func, 0, "func(info uint32) uint32"}, + {"R_TYPE64", Func, 0, "func(info uint64) uint32"}, + {"R_X86_64", Type, 0, ""}, + {"R_X86_64_16", Const, 0, ""}, + {"R_X86_64_32", Const, 0, ""}, + {"R_X86_64_32S", Const, 0, ""}, + {"R_X86_64_64", Const, 0, ""}, + {"R_X86_64_8", Const, 0, ""}, + {"R_X86_64_COPY", Const, 0, ""}, + {"R_X86_64_DTPMOD64", Const, 0, ""}, + {"R_X86_64_DTPOFF32", Const, 0, ""}, + {"R_X86_64_DTPOFF64", Const, 0, ""}, + {"R_X86_64_GLOB_DAT", Const, 0, ""}, + {"R_X86_64_GOT32", Const, 0, ""}, + {"R_X86_64_GOT64", Const, 10, ""}, + {"R_X86_64_GOTOFF64", Const, 10, ""}, + {"R_X86_64_GOTPC32", Const, 10, ""}, + {"R_X86_64_GOTPC32_TLSDESC", Const, 10, ""}, + {"R_X86_64_GOTPC64", Const, 10, ""}, + {"R_X86_64_GOTPCREL", Const, 0, ""}, + {"R_X86_64_GOTPCREL64", Const, 10, ""}, + {"R_X86_64_GOTPCRELX", Const, 10, ""}, + {"R_X86_64_GOTPLT64", Const, 10, ""}, + {"R_X86_64_GOTTPOFF", Const, 0, ""}, + {"R_X86_64_IRELATIVE", Const, 10, ""}, + {"R_X86_64_JMP_SLOT", Const, 0, ""}, + {"R_X86_64_NONE", Const, 0, ""}, + {"R_X86_64_PC16", Const, 0, ""}, + {"R_X86_64_PC32", Const, 0, ""}, + {"R_X86_64_PC32_BND", Const, 10, ""}, + {"R_X86_64_PC64", Const, 10, ""}, + {"R_X86_64_PC8", Const, 0, ""}, + {"R_X86_64_PLT32", Const, 0, ""}, + {"R_X86_64_PLT32_BND", Const, 10, ""}, + {"R_X86_64_PLTOFF64", Const, 10, ""}, + {"R_X86_64_RELATIVE", Const, 0, ""}, + {"R_X86_64_RELATIVE64", Const, 10, ""}, + {"R_X86_64_REX_GOTPCRELX", Const, 10, ""}, + {"R_X86_64_SIZE32", Const, 10, ""}, + {"R_X86_64_SIZE64", Const, 10, ""}, + {"R_X86_64_TLSDESC", Const, 10, ""}, + {"R_X86_64_TLSDESC_CALL", Const, 10, ""}, + {"R_X86_64_TLSGD", Const, 0, ""}, + {"R_X86_64_TLSLD", Const, 0, ""}, + {"R_X86_64_TPOFF32", Const, 0, ""}, + {"R_X86_64_TPOFF64", Const, 0, ""}, + {"Rel32", Type, 0, ""}, + {"Rel32.Info", Field, 0, ""}, + {"Rel32.Off", Field, 0, ""}, + {"Rel64", Type, 0, ""}, + {"Rel64.Info", Field, 0, ""}, + {"Rel64.Off", Field, 0, ""}, + {"Rela32", Type, 0, ""}, + {"Rela32.Addend", Field, 0, ""}, + {"Rela32.Info", Field, 0, ""}, + {"Rela32.Off", Field, 0, ""}, + {"Rela64", Type, 0, ""}, + {"Rela64.Addend", Field, 0, ""}, + {"Rela64.Info", Field, 0, ""}, + {"Rela64.Off", Field, 0, ""}, + {"SHF_ALLOC", Const, 0, ""}, + {"SHF_COMPRESSED", Const, 6, ""}, + {"SHF_EXECINSTR", Const, 0, ""}, + {"SHF_GROUP", Const, 0, ""}, + {"SHF_INFO_LINK", Const, 0, ""}, + {"SHF_LINK_ORDER", Const, 0, ""}, + {"SHF_MASKOS", Const, 0, ""}, + {"SHF_MASKPROC", Const, 0, ""}, + {"SHF_MERGE", Const, 0, ""}, + {"SHF_OS_NONCONFORMING", Const, 0, ""}, + {"SHF_STRINGS", Const, 0, ""}, + {"SHF_TLS", Const, 0, ""}, + {"SHF_WRITE", Const, 0, ""}, + {"SHN_ABS", Const, 0, ""}, + {"SHN_COMMON", Const, 0, ""}, + {"SHN_HIOS", Const, 0, ""}, + {"SHN_HIPROC", Const, 0, ""}, + {"SHN_HIRESERVE", Const, 0, ""}, + {"SHN_LOOS", Const, 0, ""}, + {"SHN_LOPROC", Const, 0, ""}, + {"SHN_LORESERVE", Const, 0, ""}, + {"SHN_UNDEF", Const, 0, ""}, + {"SHN_XINDEX", Const, 0, ""}, + {"SHT_DYNAMIC", Const, 0, ""}, + {"SHT_DYNSYM", Const, 0, ""}, + {"SHT_FINI_ARRAY", Const, 0, ""}, + {"SHT_GNU_ATTRIBUTES", Const, 0, ""}, + {"SHT_GNU_HASH", Const, 0, ""}, + {"SHT_GNU_LIBLIST", Const, 0, ""}, + {"SHT_GNU_VERDEF", Const, 0, ""}, + {"SHT_GNU_VERNEED", Const, 0, ""}, + {"SHT_GNU_VERSYM", Const, 0, ""}, + {"SHT_GROUP", Const, 0, ""}, + {"SHT_HASH", Const, 0, ""}, + {"SHT_HIOS", Const, 0, ""}, + {"SHT_HIPROC", Const, 0, ""}, + {"SHT_HIUSER", Const, 0, ""}, + {"SHT_INIT_ARRAY", Const, 0, ""}, + {"SHT_LOOS", Const, 0, ""}, + {"SHT_LOPROC", Const, 0, ""}, + {"SHT_LOUSER", Const, 0, ""}, + {"SHT_MIPS_ABIFLAGS", Const, 17, ""}, + {"SHT_NOBITS", Const, 0, ""}, + {"SHT_NOTE", Const, 0, ""}, + {"SHT_NULL", Const, 0, ""}, + {"SHT_PREINIT_ARRAY", Const, 0, ""}, + {"SHT_PROGBITS", Const, 0, ""}, + {"SHT_REL", Const, 0, ""}, + {"SHT_RELA", Const, 0, ""}, + {"SHT_RISCV_ATTRIBUTES", Const, 25, ""}, + {"SHT_SHLIB", Const, 0, ""}, + {"SHT_STRTAB", Const, 0, ""}, + {"SHT_SYMTAB", Const, 0, ""}, + {"SHT_SYMTAB_SHNDX", Const, 0, ""}, + {"STB_GLOBAL", Const, 0, ""}, + {"STB_HIOS", Const, 0, ""}, + {"STB_HIPROC", Const, 0, ""}, + {"STB_LOCAL", Const, 0, ""}, + {"STB_LOOS", Const, 0, ""}, + {"STB_LOPROC", Const, 0, ""}, + {"STB_WEAK", Const, 0, ""}, + {"STT_COMMON", Const, 0, ""}, + {"STT_FILE", Const, 0, ""}, + {"STT_FUNC", Const, 0, ""}, + {"STT_GNU_IFUNC", Const, 23, ""}, + {"STT_HIOS", Const, 0, ""}, + {"STT_HIPROC", Const, 0, ""}, + {"STT_LOOS", Const, 0, ""}, + {"STT_LOPROC", Const, 0, ""}, + {"STT_NOTYPE", Const, 0, ""}, + {"STT_OBJECT", Const, 0, ""}, + {"STT_RELC", Const, 23, ""}, + {"STT_SECTION", Const, 0, ""}, + {"STT_SRELC", Const, 23, ""}, + {"STT_TLS", Const, 0, ""}, + {"STV_DEFAULT", Const, 0, ""}, + {"STV_HIDDEN", Const, 0, ""}, + {"STV_INTERNAL", Const, 0, ""}, + {"STV_PROTECTED", Const, 0, ""}, + {"ST_BIND", Func, 0, "func(info uint8) SymBind"}, + {"ST_INFO", Func, 0, "func(bind SymBind, typ SymType) uint8"}, + {"ST_TYPE", Func, 0, "func(info uint8) SymType"}, + {"ST_VISIBILITY", Func, 0, "func(other uint8) SymVis"}, + {"Section", Type, 0, ""}, + {"Section.ReaderAt", Field, 0, ""}, + {"Section.SectionHeader", Field, 0, ""}, + {"Section32", Type, 0, ""}, + {"Section32.Addr", Field, 0, ""}, + {"Section32.Addralign", Field, 0, ""}, + {"Section32.Entsize", Field, 0, ""}, + {"Section32.Flags", Field, 0, ""}, + {"Section32.Info", Field, 0, ""}, + {"Section32.Link", Field, 0, ""}, + {"Section32.Name", Field, 0, ""}, + {"Section32.Off", Field, 0, ""}, + {"Section32.Size", Field, 0, ""}, + {"Section32.Type", Field, 0, ""}, + {"Section64", Type, 0, ""}, + {"Section64.Addr", Field, 0, ""}, + {"Section64.Addralign", Field, 0, ""}, + {"Section64.Entsize", Field, 0, ""}, + {"Section64.Flags", Field, 0, ""}, + {"Section64.Info", Field, 0, ""}, + {"Section64.Link", Field, 0, ""}, + {"Section64.Name", Field, 0, ""}, + {"Section64.Off", Field, 0, ""}, + {"Section64.Size", Field, 0, ""}, + {"Section64.Type", Field, 0, ""}, + {"SectionFlag", Type, 0, ""}, + {"SectionHeader", Type, 0, ""}, + {"SectionHeader.Addr", Field, 0, ""}, + {"SectionHeader.Addralign", Field, 0, ""}, + {"SectionHeader.Entsize", Field, 0, ""}, + {"SectionHeader.FileSize", Field, 6, ""}, + {"SectionHeader.Flags", Field, 0, ""}, + {"SectionHeader.Info", Field, 0, ""}, + {"SectionHeader.Link", Field, 0, ""}, + {"SectionHeader.Name", Field, 0, ""}, + {"SectionHeader.Offset", Field, 0, ""}, + {"SectionHeader.Size", Field, 0, ""}, + {"SectionHeader.Type", Field, 0, ""}, + {"SectionIndex", Type, 0, ""}, + {"SectionType", Type, 0, ""}, + {"Sym32", Type, 0, ""}, + {"Sym32.Info", Field, 0, ""}, + {"Sym32.Name", Field, 0, ""}, + {"Sym32.Other", Field, 0, ""}, + {"Sym32.Shndx", Field, 0, ""}, + {"Sym32.Size", Field, 0, ""}, + {"Sym32.Value", Field, 0, ""}, + {"Sym32Size", Const, 0, ""}, + {"Sym64", Type, 0, ""}, + {"Sym64.Info", Field, 0, ""}, + {"Sym64.Name", Field, 0, ""}, + {"Sym64.Other", Field, 0, ""}, + {"Sym64.Shndx", Field, 0, ""}, + {"Sym64.Size", Field, 0, ""}, + {"Sym64.Value", Field, 0, ""}, + {"Sym64Size", Const, 0, ""}, + {"SymBind", Type, 0, ""}, + {"SymType", Type, 0, ""}, + {"SymVis", Type, 0, ""}, + {"Symbol", Type, 0, ""}, + {"Symbol.HasVersion", Field, 24, ""}, + {"Symbol.Info", Field, 0, ""}, + {"Symbol.Library", Field, 13, ""}, + {"Symbol.Name", Field, 0, ""}, + {"Symbol.Other", Field, 0, ""}, + {"Symbol.Section", Field, 0, ""}, + {"Symbol.Size", Field, 0, ""}, + {"Symbol.Value", Field, 0, ""}, + {"Symbol.Version", Field, 13, ""}, + {"Symbol.VersionIndex", Field, 24, ""}, + {"Type", Type, 0, ""}, + {"VER_FLG_BASE", Const, 24, ""}, + {"VER_FLG_INFO", Const, 24, ""}, + {"VER_FLG_WEAK", Const, 24, ""}, + {"Version", Type, 0, ""}, + {"VersionIndex", Type, 24, ""}, + }, + "debug/gosym": { + {"(*DecodingError).Error", Method, 0, ""}, + {"(*LineTable).LineToPC", Method, 0, ""}, + {"(*LineTable).PCToLine", Method, 0, ""}, + {"(*Sym).BaseName", Method, 0, ""}, + {"(*Sym).PackageName", Method, 0, ""}, + {"(*Sym).ReceiverName", Method, 0, ""}, + {"(*Sym).Static", Method, 0, ""}, + {"(*Table).LineToPC", Method, 0, ""}, + {"(*Table).LookupFunc", Method, 0, ""}, + {"(*Table).LookupSym", Method, 0, ""}, + {"(*Table).PCToFunc", Method, 0, ""}, + {"(*Table).PCToLine", Method, 0, ""}, + {"(*Table).SymByAddr", Method, 0, ""}, + {"(*UnknownLineError).Error", Method, 0, ""}, + {"(Func).BaseName", Method, 0, ""}, + {"(Func).PackageName", Method, 0, ""}, + {"(Func).ReceiverName", Method, 0, ""}, + {"(Func).Static", Method, 0, ""}, + {"(UnknownFileError).Error", Method, 0, ""}, + {"DecodingError", Type, 0, ""}, + {"Func", Type, 0, ""}, + {"Func.End", Field, 0, ""}, + {"Func.Entry", Field, 0, ""}, + {"Func.FrameSize", Field, 0, ""}, + {"Func.LineTable", Field, 0, ""}, + {"Func.Locals", Field, 0, ""}, + {"Func.Obj", Field, 0, ""}, + {"Func.Params", Field, 0, ""}, + {"Func.Sym", Field, 0, ""}, + {"LineTable", Type, 0, ""}, + {"LineTable.Data", Field, 0, ""}, + {"LineTable.Line", Field, 0, ""}, + {"LineTable.PC", Field, 0, ""}, + {"NewLineTable", Func, 0, "func(data []byte, text uint64) *LineTable"}, + {"NewTable", Func, 0, "func(symtab []byte, pcln *LineTable) (*Table, error)"}, + {"Obj", Type, 0, ""}, + {"Obj.Funcs", Field, 0, ""}, + {"Obj.Paths", Field, 0, ""}, + {"Sym", Type, 0, ""}, + {"Sym.Func", Field, 0, ""}, + {"Sym.GoType", Field, 0, ""}, + {"Sym.Name", Field, 0, ""}, + {"Sym.Type", Field, 0, ""}, + {"Sym.Value", Field, 0, ""}, + {"Table", Type, 0, ""}, + {"Table.Files", Field, 0, ""}, + {"Table.Funcs", Field, 0, ""}, + {"Table.Objs", Field, 0, ""}, + {"Table.Syms", Field, 0, ""}, + {"UnknownFileError", Type, 0, ""}, + {"UnknownLineError", Type, 0, ""}, + {"UnknownLineError.File", Field, 0, ""}, + {"UnknownLineError.Line", Field, 0, ""}, + }, + "debug/macho": { + {"(*FatFile).Close", Method, 3, ""}, + {"(*File).Close", Method, 0, ""}, + {"(*File).DWARF", Method, 0, ""}, + {"(*File).ImportedLibraries", Method, 0, ""}, + {"(*File).ImportedSymbols", Method, 0, ""}, + {"(*File).Section", Method, 0, ""}, + {"(*File).Segment", Method, 0, ""}, + {"(*FormatError).Error", Method, 0, ""}, + {"(*Section).Data", Method, 0, ""}, + {"(*Section).Open", Method, 0, ""}, + {"(*Segment).Data", Method, 0, ""}, + {"(*Segment).Open", Method, 0, ""}, + {"(Cpu).GoString", Method, 0, ""}, + {"(Cpu).String", Method, 0, ""}, + {"(Dylib).Raw", Method, 0, ""}, + {"(Dysymtab).Raw", Method, 0, ""}, + {"(FatArch).Close", Method, 3, ""}, + {"(FatArch).DWARF", Method, 3, ""}, + {"(FatArch).ImportedLibraries", Method, 3, ""}, + {"(FatArch).ImportedSymbols", Method, 3, ""}, + {"(FatArch).Section", Method, 3, ""}, + {"(FatArch).Segment", Method, 3, ""}, + {"(LoadBytes).Raw", Method, 0, ""}, + {"(LoadCmd).GoString", Method, 0, ""}, + {"(LoadCmd).String", Method, 0, ""}, + {"(RelocTypeARM).GoString", Method, 10, ""}, + {"(RelocTypeARM).String", Method, 10, ""}, + {"(RelocTypeARM64).GoString", Method, 10, ""}, + {"(RelocTypeARM64).String", Method, 10, ""}, + {"(RelocTypeGeneric).GoString", Method, 10, ""}, + {"(RelocTypeGeneric).String", Method, 10, ""}, + {"(RelocTypeX86_64).GoString", Method, 10, ""}, + {"(RelocTypeX86_64).String", Method, 10, ""}, + {"(Rpath).Raw", Method, 10, ""}, + {"(Section).ReadAt", Method, 0, ""}, + {"(Segment).Raw", Method, 0, ""}, + {"(Segment).ReadAt", Method, 0, ""}, + {"(Symtab).Raw", Method, 0, ""}, + {"(Type).GoString", Method, 10, ""}, + {"(Type).String", Method, 10, ""}, + {"ARM64_RELOC_ADDEND", Const, 10, ""}, + {"ARM64_RELOC_BRANCH26", Const, 10, ""}, + {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10, ""}, + {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10, ""}, + {"ARM64_RELOC_PAGE21", Const, 10, ""}, + {"ARM64_RELOC_PAGEOFF12", Const, 10, ""}, + {"ARM64_RELOC_POINTER_TO_GOT", Const, 10, ""}, + {"ARM64_RELOC_SUBTRACTOR", Const, 10, ""}, + {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10, ""}, + {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10, ""}, + {"ARM64_RELOC_UNSIGNED", Const, 10, ""}, + {"ARM_RELOC_BR24", Const, 10, ""}, + {"ARM_RELOC_HALF", Const, 10, ""}, + {"ARM_RELOC_HALF_SECTDIFF", Const, 10, ""}, + {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10, ""}, + {"ARM_RELOC_PAIR", Const, 10, ""}, + {"ARM_RELOC_PB_LA_PTR", Const, 10, ""}, + {"ARM_RELOC_SECTDIFF", Const, 10, ""}, + {"ARM_RELOC_VANILLA", Const, 10, ""}, + {"ARM_THUMB_32BIT_BRANCH", Const, 10, ""}, + {"ARM_THUMB_RELOC_BR22", Const, 10, ""}, + {"Cpu", Type, 0, ""}, + {"Cpu386", Const, 0, ""}, + {"CpuAmd64", Const, 0, ""}, + {"CpuArm", Const, 3, ""}, + {"CpuArm64", Const, 11, ""}, + {"CpuPpc", Const, 3, ""}, + {"CpuPpc64", Const, 3, ""}, + {"Dylib", Type, 0, ""}, + {"Dylib.CompatVersion", Field, 0, ""}, + {"Dylib.CurrentVersion", Field, 0, ""}, + {"Dylib.LoadBytes", Field, 0, ""}, + {"Dylib.Name", Field, 0, ""}, + {"Dylib.Time", Field, 0, ""}, + {"DylibCmd", Type, 0, ""}, + {"DylibCmd.Cmd", Field, 0, ""}, + {"DylibCmd.CompatVersion", Field, 0, ""}, + {"DylibCmd.CurrentVersion", Field, 0, ""}, + {"DylibCmd.Len", Field, 0, ""}, + {"DylibCmd.Name", Field, 0, ""}, + {"DylibCmd.Time", Field, 0, ""}, + {"Dysymtab", Type, 0, ""}, + {"Dysymtab.DysymtabCmd", Field, 0, ""}, + {"Dysymtab.IndirectSyms", Field, 0, ""}, + {"Dysymtab.LoadBytes", Field, 0, ""}, + {"DysymtabCmd", Type, 0, ""}, + {"DysymtabCmd.Cmd", Field, 0, ""}, + {"DysymtabCmd.Extrefsymoff", Field, 0, ""}, + {"DysymtabCmd.Extreloff", Field, 0, ""}, + {"DysymtabCmd.Iextdefsym", Field, 0, ""}, + {"DysymtabCmd.Ilocalsym", Field, 0, ""}, + {"DysymtabCmd.Indirectsymoff", Field, 0, ""}, + {"DysymtabCmd.Iundefsym", Field, 0, ""}, + {"DysymtabCmd.Len", Field, 0, ""}, + {"DysymtabCmd.Locreloff", Field, 0, ""}, + {"DysymtabCmd.Modtaboff", Field, 0, ""}, + {"DysymtabCmd.Nextdefsym", Field, 0, ""}, + {"DysymtabCmd.Nextrefsyms", Field, 0, ""}, + {"DysymtabCmd.Nextrel", Field, 0, ""}, + {"DysymtabCmd.Nindirectsyms", Field, 0, ""}, + {"DysymtabCmd.Nlocalsym", Field, 0, ""}, + {"DysymtabCmd.Nlocrel", Field, 0, ""}, + {"DysymtabCmd.Nmodtab", Field, 0, ""}, + {"DysymtabCmd.Ntoc", Field, 0, ""}, + {"DysymtabCmd.Nundefsym", Field, 0, ""}, + {"DysymtabCmd.Tocoffset", Field, 0, ""}, + {"ErrNotFat", Var, 3, ""}, + {"FatArch", Type, 3, ""}, + {"FatArch.FatArchHeader", Field, 3, ""}, + {"FatArch.File", Field, 3, ""}, + {"FatArchHeader", Type, 3, ""}, + {"FatArchHeader.Align", Field, 3, ""}, + {"FatArchHeader.Cpu", Field, 3, ""}, + {"FatArchHeader.Offset", Field, 3, ""}, + {"FatArchHeader.Size", Field, 3, ""}, + {"FatArchHeader.SubCpu", Field, 3, ""}, + {"FatFile", Type, 3, ""}, + {"FatFile.Arches", Field, 3, ""}, + {"FatFile.Magic", Field, 3, ""}, + {"File", Type, 0, ""}, + {"File.ByteOrder", Field, 0, ""}, + {"File.Dysymtab", Field, 0, ""}, + {"File.FileHeader", Field, 0, ""}, + {"File.Loads", Field, 0, ""}, + {"File.Sections", Field, 0, ""}, + {"File.Symtab", Field, 0, ""}, + {"FileHeader", Type, 0, ""}, + {"FileHeader.Cmdsz", Field, 0, ""}, + {"FileHeader.Cpu", Field, 0, ""}, + {"FileHeader.Flags", Field, 0, ""}, + {"FileHeader.Magic", Field, 0, ""}, + {"FileHeader.Ncmd", Field, 0, ""}, + {"FileHeader.SubCpu", Field, 0, ""}, + {"FileHeader.Type", Field, 0, ""}, + {"FlagAllModsBound", Const, 10, ""}, + {"FlagAllowStackExecution", Const, 10, ""}, + {"FlagAppExtensionSafe", Const, 10, ""}, + {"FlagBindAtLoad", Const, 10, ""}, + {"FlagBindsToWeak", Const, 10, ""}, + {"FlagCanonical", Const, 10, ""}, + {"FlagDeadStrippableDylib", Const, 10, ""}, + {"FlagDyldLink", Const, 10, ""}, + {"FlagForceFlat", Const, 10, ""}, + {"FlagHasTLVDescriptors", Const, 10, ""}, + {"FlagIncrLink", Const, 10, ""}, + {"FlagLazyInit", Const, 10, ""}, + {"FlagNoFixPrebinding", Const, 10, ""}, + {"FlagNoHeapExecution", Const, 10, ""}, + {"FlagNoMultiDefs", Const, 10, ""}, + {"FlagNoReexportedDylibs", Const, 10, ""}, + {"FlagNoUndefs", Const, 10, ""}, + {"FlagPIE", Const, 10, ""}, + {"FlagPrebindable", Const, 10, ""}, + {"FlagPrebound", Const, 10, ""}, + {"FlagRootSafe", Const, 10, ""}, + {"FlagSetuidSafe", Const, 10, ""}, + {"FlagSplitSegs", Const, 10, ""}, + {"FlagSubsectionsViaSymbols", Const, 10, ""}, + {"FlagTwoLevel", Const, 10, ""}, + {"FlagWeakDefines", Const, 10, ""}, + {"FormatError", Type, 0, ""}, + {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10, ""}, + {"GENERIC_RELOC_PAIR", Const, 10, ""}, + {"GENERIC_RELOC_PB_LA_PTR", Const, 10, ""}, + {"GENERIC_RELOC_SECTDIFF", Const, 10, ""}, + {"GENERIC_RELOC_TLV", Const, 10, ""}, + {"GENERIC_RELOC_VANILLA", Const, 10, ""}, + {"Load", Type, 0, ""}, + {"LoadBytes", Type, 0, ""}, + {"LoadCmd", Type, 0, ""}, + {"LoadCmdDylib", Const, 0, ""}, + {"LoadCmdDylinker", Const, 0, ""}, + {"LoadCmdDysymtab", Const, 0, ""}, + {"LoadCmdRpath", Const, 10, ""}, + {"LoadCmdSegment", Const, 0, ""}, + {"LoadCmdSegment64", Const, 0, ""}, + {"LoadCmdSymtab", Const, 0, ""}, + {"LoadCmdThread", Const, 0, ""}, + {"LoadCmdUnixThread", Const, 0, ""}, + {"Magic32", Const, 0, ""}, + {"Magic64", Const, 0, ""}, + {"MagicFat", Const, 3, ""}, + {"NewFatFile", Func, 3, "func(r io.ReaderAt) (*FatFile, error)"}, + {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"}, + {"Nlist32", Type, 0, ""}, + {"Nlist32.Desc", Field, 0, ""}, + {"Nlist32.Name", Field, 0, ""}, + {"Nlist32.Sect", Field, 0, ""}, + {"Nlist32.Type", Field, 0, ""}, + {"Nlist32.Value", Field, 0, ""}, + {"Nlist64", Type, 0, ""}, + {"Nlist64.Desc", Field, 0, ""}, + {"Nlist64.Name", Field, 0, ""}, + {"Nlist64.Sect", Field, 0, ""}, + {"Nlist64.Type", Field, 0, ""}, + {"Nlist64.Value", Field, 0, ""}, + {"Open", Func, 0, "func(name string) (*File, error)"}, + {"OpenFat", Func, 3, "func(name string) (*FatFile, error)"}, + {"Regs386", Type, 0, ""}, + {"Regs386.AX", Field, 0, ""}, + {"Regs386.BP", Field, 0, ""}, + {"Regs386.BX", Field, 0, ""}, + {"Regs386.CS", Field, 0, ""}, + {"Regs386.CX", Field, 0, ""}, + {"Regs386.DI", Field, 0, ""}, + {"Regs386.DS", Field, 0, ""}, + {"Regs386.DX", Field, 0, ""}, + {"Regs386.ES", Field, 0, ""}, + {"Regs386.FLAGS", Field, 0, ""}, + {"Regs386.FS", Field, 0, ""}, + {"Regs386.GS", Field, 0, ""}, + {"Regs386.IP", Field, 0, ""}, + {"Regs386.SI", Field, 0, ""}, + {"Regs386.SP", Field, 0, ""}, + {"Regs386.SS", Field, 0, ""}, + {"RegsAMD64", Type, 0, ""}, + {"RegsAMD64.AX", Field, 0, ""}, + {"RegsAMD64.BP", Field, 0, ""}, + {"RegsAMD64.BX", Field, 0, ""}, + {"RegsAMD64.CS", Field, 0, ""}, + {"RegsAMD64.CX", Field, 0, ""}, + {"RegsAMD64.DI", Field, 0, ""}, + {"RegsAMD64.DX", Field, 0, ""}, + {"RegsAMD64.FLAGS", Field, 0, ""}, + {"RegsAMD64.FS", Field, 0, ""}, + {"RegsAMD64.GS", Field, 0, ""}, + {"RegsAMD64.IP", Field, 0, ""}, + {"RegsAMD64.R10", Field, 0, ""}, + {"RegsAMD64.R11", Field, 0, ""}, + {"RegsAMD64.R12", Field, 0, ""}, + {"RegsAMD64.R13", Field, 0, ""}, + {"RegsAMD64.R14", Field, 0, ""}, + {"RegsAMD64.R15", Field, 0, ""}, + {"RegsAMD64.R8", Field, 0, ""}, + {"RegsAMD64.R9", Field, 0, ""}, + {"RegsAMD64.SI", Field, 0, ""}, + {"RegsAMD64.SP", Field, 0, ""}, + {"Reloc", Type, 10, ""}, + {"Reloc.Addr", Field, 10, ""}, + {"Reloc.Extern", Field, 10, ""}, + {"Reloc.Len", Field, 10, ""}, + {"Reloc.Pcrel", Field, 10, ""}, + {"Reloc.Scattered", Field, 10, ""}, + {"Reloc.Type", Field, 10, ""}, + {"Reloc.Value", Field, 10, ""}, + {"RelocTypeARM", Type, 10, ""}, + {"RelocTypeARM64", Type, 10, ""}, + {"RelocTypeGeneric", Type, 10, ""}, + {"RelocTypeX86_64", Type, 10, ""}, + {"Rpath", Type, 10, ""}, + {"Rpath.LoadBytes", Field, 10, ""}, + {"Rpath.Path", Field, 10, ""}, + {"RpathCmd", Type, 10, ""}, + {"RpathCmd.Cmd", Field, 10, ""}, + {"RpathCmd.Len", Field, 10, ""}, + {"RpathCmd.Path", Field, 10, ""}, + {"Section", Type, 0, ""}, + {"Section.ReaderAt", Field, 0, ""}, + {"Section.Relocs", Field, 10, ""}, + {"Section.SectionHeader", Field, 0, ""}, + {"Section32", Type, 0, ""}, + {"Section32.Addr", Field, 0, ""}, + {"Section32.Align", Field, 0, ""}, + {"Section32.Flags", Field, 0, ""}, + {"Section32.Name", Field, 0, ""}, + {"Section32.Nreloc", Field, 0, ""}, + {"Section32.Offset", Field, 0, ""}, + {"Section32.Reloff", Field, 0, ""}, + {"Section32.Reserve1", Field, 0, ""}, + {"Section32.Reserve2", Field, 0, ""}, + {"Section32.Seg", Field, 0, ""}, + {"Section32.Size", Field, 0, ""}, + {"Section64", Type, 0, ""}, + {"Section64.Addr", Field, 0, ""}, + {"Section64.Align", Field, 0, ""}, + {"Section64.Flags", Field, 0, ""}, + {"Section64.Name", Field, 0, ""}, + {"Section64.Nreloc", Field, 0, ""}, + {"Section64.Offset", Field, 0, ""}, + {"Section64.Reloff", Field, 0, ""}, + {"Section64.Reserve1", Field, 0, ""}, + {"Section64.Reserve2", Field, 0, ""}, + {"Section64.Reserve3", Field, 0, ""}, + {"Section64.Seg", Field, 0, ""}, + {"Section64.Size", Field, 0, ""}, + {"SectionHeader", Type, 0, ""}, + {"SectionHeader.Addr", Field, 0, ""}, + {"SectionHeader.Align", Field, 0, ""}, + {"SectionHeader.Flags", Field, 0, ""}, + {"SectionHeader.Name", Field, 0, ""}, + {"SectionHeader.Nreloc", Field, 0, ""}, + {"SectionHeader.Offset", Field, 0, ""}, + {"SectionHeader.Reloff", Field, 0, ""}, + {"SectionHeader.Seg", Field, 0, ""}, + {"SectionHeader.Size", Field, 0, ""}, + {"Segment", Type, 0, ""}, + {"Segment.LoadBytes", Field, 0, ""}, + {"Segment.ReaderAt", Field, 0, ""}, + {"Segment.SegmentHeader", Field, 0, ""}, + {"Segment32", Type, 0, ""}, + {"Segment32.Addr", Field, 0, ""}, + {"Segment32.Cmd", Field, 0, ""}, + {"Segment32.Filesz", Field, 0, ""}, + {"Segment32.Flag", Field, 0, ""}, + {"Segment32.Len", Field, 0, ""}, + {"Segment32.Maxprot", Field, 0, ""}, + {"Segment32.Memsz", Field, 0, ""}, + {"Segment32.Name", Field, 0, ""}, + {"Segment32.Nsect", Field, 0, ""}, + {"Segment32.Offset", Field, 0, ""}, + {"Segment32.Prot", Field, 0, ""}, + {"Segment64", Type, 0, ""}, + {"Segment64.Addr", Field, 0, ""}, + {"Segment64.Cmd", Field, 0, ""}, + {"Segment64.Filesz", Field, 0, ""}, + {"Segment64.Flag", Field, 0, ""}, + {"Segment64.Len", Field, 0, ""}, + {"Segment64.Maxprot", Field, 0, ""}, + {"Segment64.Memsz", Field, 0, ""}, + {"Segment64.Name", Field, 0, ""}, + {"Segment64.Nsect", Field, 0, ""}, + {"Segment64.Offset", Field, 0, ""}, + {"Segment64.Prot", Field, 0, ""}, + {"SegmentHeader", Type, 0, ""}, + {"SegmentHeader.Addr", Field, 0, ""}, + {"SegmentHeader.Cmd", Field, 0, ""}, + {"SegmentHeader.Filesz", Field, 0, ""}, + {"SegmentHeader.Flag", Field, 0, ""}, + {"SegmentHeader.Len", Field, 0, ""}, + {"SegmentHeader.Maxprot", Field, 0, ""}, + {"SegmentHeader.Memsz", Field, 0, ""}, + {"SegmentHeader.Name", Field, 0, ""}, + {"SegmentHeader.Nsect", Field, 0, ""}, + {"SegmentHeader.Offset", Field, 0, ""}, + {"SegmentHeader.Prot", Field, 0, ""}, + {"Symbol", Type, 0, ""}, + {"Symbol.Desc", Field, 0, ""}, + {"Symbol.Name", Field, 0, ""}, + {"Symbol.Sect", Field, 0, ""}, + {"Symbol.Type", Field, 0, ""}, + {"Symbol.Value", Field, 0, ""}, + {"Symtab", Type, 0, ""}, + {"Symtab.LoadBytes", Field, 0, ""}, + {"Symtab.Syms", Field, 0, ""}, + {"Symtab.SymtabCmd", Field, 0, ""}, + {"SymtabCmd", Type, 0, ""}, + {"SymtabCmd.Cmd", Field, 0, ""}, + {"SymtabCmd.Len", Field, 0, ""}, + {"SymtabCmd.Nsyms", Field, 0, ""}, + {"SymtabCmd.Stroff", Field, 0, ""}, + {"SymtabCmd.Strsize", Field, 0, ""}, + {"SymtabCmd.Symoff", Field, 0, ""}, + {"Thread", Type, 0, ""}, + {"Thread.Cmd", Field, 0, ""}, + {"Thread.Data", Field, 0, ""}, + {"Thread.Len", Field, 0, ""}, + {"Thread.Type", Field, 0, ""}, + {"Type", Type, 0, ""}, + {"TypeBundle", Const, 3, ""}, + {"TypeDylib", Const, 3, ""}, + {"TypeExec", Const, 0, ""}, + {"TypeObj", Const, 0, ""}, + {"X86_64_RELOC_BRANCH", Const, 10, ""}, + {"X86_64_RELOC_GOT", Const, 10, ""}, + {"X86_64_RELOC_GOT_LOAD", Const, 10, ""}, + {"X86_64_RELOC_SIGNED", Const, 10, ""}, + {"X86_64_RELOC_SIGNED_1", Const, 10, ""}, + {"X86_64_RELOC_SIGNED_2", Const, 10, ""}, + {"X86_64_RELOC_SIGNED_4", Const, 10, ""}, + {"X86_64_RELOC_SUBTRACTOR", Const, 10, ""}, + {"X86_64_RELOC_TLV", Const, 10, ""}, + {"X86_64_RELOC_UNSIGNED", Const, 10, ""}, + }, + "debug/pe": { + {"(*COFFSymbol).FullName", Method, 8, ""}, + {"(*File).COFFSymbolReadSectionDefAux", Method, 19, ""}, + {"(*File).Close", Method, 0, ""}, + {"(*File).DWARF", Method, 0, ""}, + {"(*File).ImportedLibraries", Method, 0, ""}, + {"(*File).ImportedSymbols", Method, 0, ""}, + {"(*File).Section", Method, 0, ""}, + {"(*FormatError).Error", Method, 0, ""}, + {"(*Section).Data", Method, 0, ""}, + {"(*Section).Open", Method, 0, ""}, + {"(Section).ReadAt", Method, 0, ""}, + {"(StringTable).String", Method, 8, ""}, + {"COFFSymbol", Type, 1, ""}, + {"COFFSymbol.Name", Field, 1, ""}, + {"COFFSymbol.NumberOfAuxSymbols", Field, 1, ""}, + {"COFFSymbol.SectionNumber", Field, 1, ""}, + {"COFFSymbol.StorageClass", Field, 1, ""}, + {"COFFSymbol.Type", Field, 1, ""}, + {"COFFSymbol.Value", Field, 1, ""}, + {"COFFSymbolAuxFormat5", Type, 19, ""}, + {"COFFSymbolAuxFormat5.Checksum", Field, 19, ""}, + {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19, ""}, + {"COFFSymbolAuxFormat5.NumRelocs", Field, 19, ""}, + {"COFFSymbolAuxFormat5.SecNum", Field, 19, ""}, + {"COFFSymbolAuxFormat5.Selection", Field, 19, ""}, + {"COFFSymbolAuxFormat5.Size", Field, 19, ""}, + {"COFFSymbolSize", Const, 1, ""}, + {"DataDirectory", Type, 3, ""}, + {"DataDirectory.Size", Field, 3, ""}, + {"DataDirectory.VirtualAddress", Field, 3, ""}, + {"File", Type, 0, ""}, + {"File.COFFSymbols", Field, 8, ""}, + {"File.FileHeader", Field, 0, ""}, + {"File.OptionalHeader", Field, 3, ""}, + {"File.Sections", Field, 0, ""}, + {"File.StringTable", Field, 8, ""}, + {"File.Symbols", Field, 1, ""}, + {"FileHeader", Type, 0, ""}, + {"FileHeader.Characteristics", Field, 0, ""}, + {"FileHeader.Machine", Field, 0, ""}, + {"FileHeader.NumberOfSections", Field, 0, ""}, + {"FileHeader.NumberOfSymbols", Field, 0, ""}, + {"FileHeader.PointerToSymbolTable", Field, 0, ""}, + {"FileHeader.SizeOfOptionalHeader", Field, 0, ""}, + {"FileHeader.TimeDateStamp", Field, 0, ""}, + {"FormatError", Type, 0, ""}, + {"IMAGE_COMDAT_SELECT_ANY", Const, 19, ""}, + {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19, ""}, + {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19, ""}, + {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19, ""}, + {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19, ""}, + {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19, ""}, + {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11, ""}, + {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11, ""}, + {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15, ""}, + {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15, ""}, + {"IMAGE_FILE_32BIT_MACHINE", Const, 15, ""}, + {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15, ""}, + {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15, ""}, + {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15, ""}, + {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15, ""}, + {"IMAGE_FILE_DLL", Const, 15, ""}, + {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15, ""}, + {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15, ""}, + {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15, ""}, + {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15, ""}, + {"IMAGE_FILE_MACHINE_AM33", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_AMD64", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_ARM", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_ARM64", Const, 11, ""}, + {"IMAGE_FILE_MACHINE_ARMNT", Const, 12, ""}, + {"IMAGE_FILE_MACHINE_EBC", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_I386", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_IA64", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19, ""}, + {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19, ""}, + {"IMAGE_FILE_MACHINE_M32R", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_MIPS16", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_POWERPC", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_R4000", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_RISCV128", Const, 20, ""}, + {"IMAGE_FILE_MACHINE_RISCV32", Const, 20, ""}, + {"IMAGE_FILE_MACHINE_RISCV64", Const, 20, ""}, + {"IMAGE_FILE_MACHINE_SH3", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_SH4", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_SH5", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_THUMB", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0, ""}, + {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0, ""}, + {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15, ""}, + {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15, ""}, + {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15, ""}, + {"IMAGE_FILE_SYSTEM", Const, 15, ""}, + {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15, ""}, + {"IMAGE_SCN_CNT_CODE", Const, 19, ""}, + {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19, ""}, + {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19, ""}, + {"IMAGE_SCN_LNK_COMDAT", Const, 19, ""}, + {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19, ""}, + {"IMAGE_SCN_MEM_EXECUTE", Const, 19, ""}, + {"IMAGE_SCN_MEM_READ", Const, 19, ""}, + {"IMAGE_SCN_MEM_WRITE", Const, 19, ""}, + {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_NATIVE", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15, ""}, + {"IMAGE_SUBSYSTEM_XBOX", Const, 15, ""}, + {"ImportDirectory", Type, 0, ""}, + {"ImportDirectory.FirstThunk", Field, 0, ""}, + {"ImportDirectory.ForwarderChain", Field, 0, ""}, + {"ImportDirectory.Name", Field, 0, ""}, + {"ImportDirectory.OriginalFirstThunk", Field, 0, ""}, + {"ImportDirectory.TimeDateStamp", Field, 0, ""}, + {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"}, + {"Open", Func, 0, "func(name string) (*File, error)"}, + {"OptionalHeader32", Type, 3, ""}, + {"OptionalHeader32.AddressOfEntryPoint", Field, 3, ""}, + {"OptionalHeader32.BaseOfCode", Field, 3, ""}, + {"OptionalHeader32.BaseOfData", Field, 3, ""}, + {"OptionalHeader32.CheckSum", Field, 3, ""}, + {"OptionalHeader32.DataDirectory", Field, 3, ""}, + {"OptionalHeader32.DllCharacteristics", Field, 3, ""}, + {"OptionalHeader32.FileAlignment", Field, 3, ""}, + {"OptionalHeader32.ImageBase", Field, 3, ""}, + {"OptionalHeader32.LoaderFlags", Field, 3, ""}, + {"OptionalHeader32.Magic", Field, 3, ""}, + {"OptionalHeader32.MajorImageVersion", Field, 3, ""}, + {"OptionalHeader32.MajorLinkerVersion", Field, 3, ""}, + {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3, ""}, + {"OptionalHeader32.MajorSubsystemVersion", Field, 3, ""}, + {"OptionalHeader32.MinorImageVersion", Field, 3, ""}, + {"OptionalHeader32.MinorLinkerVersion", Field, 3, ""}, + {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3, ""}, + {"OptionalHeader32.MinorSubsystemVersion", Field, 3, ""}, + {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3, ""}, + {"OptionalHeader32.SectionAlignment", Field, 3, ""}, + {"OptionalHeader32.SizeOfCode", Field, 3, ""}, + {"OptionalHeader32.SizeOfHeaders", Field, 3, ""}, + {"OptionalHeader32.SizeOfHeapCommit", Field, 3, ""}, + {"OptionalHeader32.SizeOfHeapReserve", Field, 3, ""}, + {"OptionalHeader32.SizeOfImage", Field, 3, ""}, + {"OptionalHeader32.SizeOfInitializedData", Field, 3, ""}, + {"OptionalHeader32.SizeOfStackCommit", Field, 3, ""}, + {"OptionalHeader32.SizeOfStackReserve", Field, 3, ""}, + {"OptionalHeader32.SizeOfUninitializedData", Field, 3, ""}, + {"OptionalHeader32.Subsystem", Field, 3, ""}, + {"OptionalHeader32.Win32VersionValue", Field, 3, ""}, + {"OptionalHeader64", Type, 3, ""}, + {"OptionalHeader64.AddressOfEntryPoint", Field, 3, ""}, + {"OptionalHeader64.BaseOfCode", Field, 3, ""}, + {"OptionalHeader64.CheckSum", Field, 3, ""}, + {"OptionalHeader64.DataDirectory", Field, 3, ""}, + {"OptionalHeader64.DllCharacteristics", Field, 3, ""}, + {"OptionalHeader64.FileAlignment", Field, 3, ""}, + {"OptionalHeader64.ImageBase", Field, 3, ""}, + {"OptionalHeader64.LoaderFlags", Field, 3, ""}, + {"OptionalHeader64.Magic", Field, 3, ""}, + {"OptionalHeader64.MajorImageVersion", Field, 3, ""}, + {"OptionalHeader64.MajorLinkerVersion", Field, 3, ""}, + {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3, ""}, + {"OptionalHeader64.MajorSubsystemVersion", Field, 3, ""}, + {"OptionalHeader64.MinorImageVersion", Field, 3, ""}, + {"OptionalHeader64.MinorLinkerVersion", Field, 3, ""}, + {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3, ""}, + {"OptionalHeader64.MinorSubsystemVersion", Field, 3, ""}, + {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3, ""}, + {"OptionalHeader64.SectionAlignment", Field, 3, ""}, + {"OptionalHeader64.SizeOfCode", Field, 3, ""}, + {"OptionalHeader64.SizeOfHeaders", Field, 3, ""}, + {"OptionalHeader64.SizeOfHeapCommit", Field, 3, ""}, + {"OptionalHeader64.SizeOfHeapReserve", Field, 3, ""}, + {"OptionalHeader64.SizeOfImage", Field, 3, ""}, + {"OptionalHeader64.SizeOfInitializedData", Field, 3, ""}, + {"OptionalHeader64.SizeOfStackCommit", Field, 3, ""}, + {"OptionalHeader64.SizeOfStackReserve", Field, 3, ""}, + {"OptionalHeader64.SizeOfUninitializedData", Field, 3, ""}, + {"OptionalHeader64.Subsystem", Field, 3, ""}, + {"OptionalHeader64.Win32VersionValue", Field, 3, ""}, + {"Reloc", Type, 8, ""}, + {"Reloc.SymbolTableIndex", Field, 8, ""}, + {"Reloc.Type", Field, 8, ""}, + {"Reloc.VirtualAddress", Field, 8, ""}, + {"Section", Type, 0, ""}, + {"Section.ReaderAt", Field, 0, ""}, + {"Section.Relocs", Field, 8, ""}, + {"Section.SectionHeader", Field, 0, ""}, + {"SectionHeader", Type, 0, ""}, + {"SectionHeader.Characteristics", Field, 0, ""}, + {"SectionHeader.Name", Field, 0, ""}, + {"SectionHeader.NumberOfLineNumbers", Field, 0, ""}, + {"SectionHeader.NumberOfRelocations", Field, 0, ""}, + {"SectionHeader.Offset", Field, 0, ""}, + {"SectionHeader.PointerToLineNumbers", Field, 0, ""}, + {"SectionHeader.PointerToRelocations", Field, 0, ""}, + {"SectionHeader.Size", Field, 0, ""}, + {"SectionHeader.VirtualAddress", Field, 0, ""}, + {"SectionHeader.VirtualSize", Field, 0, ""}, + {"SectionHeader32", Type, 0, ""}, + {"SectionHeader32.Characteristics", Field, 0, ""}, + {"SectionHeader32.Name", Field, 0, ""}, + {"SectionHeader32.NumberOfLineNumbers", Field, 0, ""}, + {"SectionHeader32.NumberOfRelocations", Field, 0, ""}, + {"SectionHeader32.PointerToLineNumbers", Field, 0, ""}, + {"SectionHeader32.PointerToRawData", Field, 0, ""}, + {"SectionHeader32.PointerToRelocations", Field, 0, ""}, + {"SectionHeader32.SizeOfRawData", Field, 0, ""}, + {"SectionHeader32.VirtualAddress", Field, 0, ""}, + {"SectionHeader32.VirtualSize", Field, 0, ""}, + {"StringTable", Type, 8, ""}, + {"Symbol", Type, 1, ""}, + {"Symbol.Name", Field, 1, ""}, + {"Symbol.SectionNumber", Field, 1, ""}, + {"Symbol.StorageClass", Field, 1, ""}, + {"Symbol.Type", Field, 1, ""}, + {"Symbol.Value", Field, 1, ""}, + }, + "debug/plan9obj": { + {"(*File).Close", Method, 3, ""}, + {"(*File).Section", Method, 3, ""}, + {"(*File).Symbols", Method, 3, ""}, + {"(*Section).Data", Method, 3, ""}, + {"(*Section).Open", Method, 3, ""}, + {"(Section).ReadAt", Method, 3, ""}, + {"ErrNoSymbols", Var, 18, ""}, + {"File", Type, 3, ""}, + {"File.FileHeader", Field, 3, ""}, + {"File.Sections", Field, 3, ""}, + {"FileHeader", Type, 3, ""}, + {"FileHeader.Bss", Field, 3, ""}, + {"FileHeader.Entry", Field, 3, ""}, + {"FileHeader.HdrSize", Field, 4, ""}, + {"FileHeader.LoadAddress", Field, 4, ""}, + {"FileHeader.Magic", Field, 3, ""}, + {"FileHeader.PtrSize", Field, 3, ""}, + {"Magic386", Const, 3, ""}, + {"Magic64", Const, 3, ""}, + {"MagicAMD64", Const, 3, ""}, + {"MagicARM", Const, 3, ""}, + {"NewFile", Func, 3, "func(r io.ReaderAt) (*File, error)"}, + {"Open", Func, 3, "func(name string) (*File, error)"}, + {"Section", Type, 3, ""}, + {"Section.ReaderAt", Field, 3, ""}, + {"Section.SectionHeader", Field, 3, ""}, + {"SectionHeader", Type, 3, ""}, + {"SectionHeader.Name", Field, 3, ""}, + {"SectionHeader.Offset", Field, 3, ""}, + {"SectionHeader.Size", Field, 3, ""}, + {"Sym", Type, 3, ""}, + {"Sym.Name", Field, 3, ""}, + {"Sym.Type", Field, 3, ""}, + {"Sym.Value", Field, 3, ""}, + }, + "embed": { + {"(FS).Open", Method, 16, ""}, + {"(FS).ReadDir", Method, 16, ""}, + {"(FS).ReadFile", Method, 16, ""}, + {"FS", Type, 16, ""}, + }, + "encoding": { + {"BinaryAppender", Type, 24, ""}, + {"BinaryMarshaler", Type, 2, ""}, + {"BinaryUnmarshaler", Type, 2, ""}, + {"TextAppender", Type, 24, ""}, + {"TextMarshaler", Type, 2, ""}, + {"TextUnmarshaler", Type, 2, ""}, + }, + "encoding/ascii85": { + {"(CorruptInputError).Error", Method, 0, ""}, + {"CorruptInputError", Type, 0, ""}, + {"Decode", Func, 0, "func(dst []byte, src []byte, flush bool) (ndst int, nsrc int, err error)"}, + {"Encode", Func, 0, "func(dst []byte, src []byte) int"}, + {"MaxEncodedLen", Func, 0, "func(n int) int"}, + {"NewDecoder", Func, 0, "func(r io.Reader) io.Reader"}, + {"NewEncoder", Func, 0, "func(w io.Writer) io.WriteCloser"}, + }, + "encoding/asn1": { + {"(BitString).At", Method, 0, ""}, + {"(BitString).RightAlign", Method, 0, ""}, + {"(ObjectIdentifier).Equal", Method, 0, ""}, + {"(ObjectIdentifier).String", Method, 3, ""}, + {"(StructuralError).Error", Method, 0, ""}, + {"(SyntaxError).Error", Method, 0, ""}, + {"BitString", Type, 0, ""}, + {"BitString.BitLength", Field, 0, ""}, + {"BitString.Bytes", Field, 0, ""}, + {"ClassApplication", Const, 6, ""}, + {"ClassContextSpecific", Const, 6, ""}, + {"ClassPrivate", Const, 6, ""}, + {"ClassUniversal", Const, 6, ""}, + {"Enumerated", Type, 0, ""}, + {"Flag", Type, 0, ""}, + {"Marshal", Func, 0, "func(val any) ([]byte, error)"}, + {"MarshalWithParams", Func, 10, "func(val any, params string) ([]byte, error)"}, + {"NullBytes", Var, 9, ""}, + {"NullRawValue", Var, 9, ""}, + {"ObjectIdentifier", Type, 0, ""}, + {"RawContent", Type, 0, ""}, + {"RawValue", Type, 0, ""}, + {"RawValue.Bytes", Field, 0, ""}, + {"RawValue.Class", Field, 0, ""}, + {"RawValue.FullBytes", Field, 0, ""}, + {"RawValue.IsCompound", Field, 0, ""}, + {"RawValue.Tag", Field, 0, ""}, + {"StructuralError", Type, 0, ""}, + {"StructuralError.Msg", Field, 0, ""}, + {"SyntaxError", Type, 0, ""}, + {"SyntaxError.Msg", Field, 0, ""}, + {"TagBMPString", Const, 14, ""}, + {"TagBitString", Const, 6, ""}, + {"TagBoolean", Const, 6, ""}, + {"TagEnum", Const, 6, ""}, + {"TagGeneralString", Const, 6, ""}, + {"TagGeneralizedTime", Const, 6, ""}, + {"TagIA5String", Const, 6, ""}, + {"TagInteger", Const, 6, ""}, + {"TagNull", Const, 9, ""}, + {"TagNumericString", Const, 10, ""}, + {"TagOID", Const, 6, ""}, + {"TagOctetString", Const, 6, ""}, + {"TagPrintableString", Const, 6, ""}, + {"TagSequence", Const, 6, ""}, + {"TagSet", Const, 6, ""}, + {"TagT61String", Const, 6, ""}, + {"TagUTCTime", Const, 6, ""}, + {"TagUTF8String", Const, 6, ""}, + {"Unmarshal", Func, 0, "func(b []byte, val any) (rest []byte, err error)"}, + {"UnmarshalWithParams", Func, 0, "func(b []byte, val any, params string) (rest []byte, err error)"}, + }, + "encoding/base32": { + {"(*Encoding).AppendDecode", Method, 22, ""}, + {"(*Encoding).AppendEncode", Method, 22, ""}, + {"(*Encoding).Decode", Method, 0, ""}, + {"(*Encoding).DecodeString", Method, 0, ""}, + {"(*Encoding).DecodedLen", Method, 0, ""}, + {"(*Encoding).Encode", Method, 0, ""}, + {"(*Encoding).EncodeToString", Method, 0, ""}, + {"(*Encoding).EncodedLen", Method, 0, ""}, + {"(CorruptInputError).Error", Method, 0, ""}, + {"(Encoding).WithPadding", Method, 9, ""}, + {"CorruptInputError", Type, 0, ""}, + {"Encoding", Type, 0, ""}, + {"HexEncoding", Var, 0, ""}, + {"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"}, + {"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"}, + {"NewEncoding", Func, 0, "func(encoder string) *Encoding"}, + {"NoPadding", Const, 9, ""}, + {"StdEncoding", Var, 0, ""}, + {"StdPadding", Const, 9, ""}, + }, + "encoding/base64": { + {"(*Encoding).AppendDecode", Method, 22, ""}, + {"(*Encoding).AppendEncode", Method, 22, ""}, + {"(*Encoding).Decode", Method, 0, ""}, + {"(*Encoding).DecodeString", Method, 0, ""}, + {"(*Encoding).DecodedLen", Method, 0, ""}, + {"(*Encoding).Encode", Method, 0, ""}, + {"(*Encoding).EncodeToString", Method, 0, ""}, + {"(*Encoding).EncodedLen", Method, 0, ""}, + {"(CorruptInputError).Error", Method, 0, ""}, + {"(Encoding).Strict", Method, 8, ""}, + {"(Encoding).WithPadding", Method, 5, ""}, + {"CorruptInputError", Type, 0, ""}, + {"Encoding", Type, 0, ""}, + {"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"}, + {"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"}, + {"NewEncoding", Func, 0, "func(encoder string) *Encoding"}, + {"NoPadding", Const, 5, ""}, + {"RawStdEncoding", Var, 5, ""}, + {"RawURLEncoding", Var, 5, ""}, + {"StdEncoding", Var, 0, ""}, + {"StdPadding", Const, 5, ""}, + {"URLEncoding", Var, 0, ""}, + }, + "encoding/binary": { + {"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"}, + {"AppendByteOrder", Type, 19, ""}, + {"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"}, + {"AppendVarint", Func, 19, "func(buf []byte, x int64) []byte"}, + {"BigEndian", Var, 0, ""}, + {"ByteOrder", Type, 0, ""}, + {"Decode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"}, + {"Encode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"}, + {"LittleEndian", Var, 0, ""}, + {"MaxVarintLen16", Const, 0, ""}, + {"MaxVarintLen32", Const, 0, ""}, + {"MaxVarintLen64", Const, 0, ""}, + {"NativeEndian", Var, 21, ""}, + {"PutUvarint", Func, 0, "func(buf []byte, x uint64) int"}, + {"PutVarint", Func, 0, "func(buf []byte, x int64) int"}, + {"Read", Func, 0, "func(r io.Reader, order ByteOrder, data any) error"}, + {"ReadUvarint", Func, 0, "func(r io.ByteReader) (uint64, error)"}, + {"ReadVarint", Func, 0, "func(r io.ByteReader) (int64, error)"}, + {"Size", Func, 0, "func(v any) int"}, + {"Uvarint", Func, 0, "func(buf []byte) (uint64, int)"}, + {"Varint", Func, 0, "func(buf []byte) (int64, int)"}, + {"Write", Func, 0, "func(w io.Writer, order ByteOrder, data any) error"}, + }, + "encoding/csv": { + {"(*ParseError).Error", Method, 0, ""}, + {"(*ParseError).Unwrap", Method, 13, ""}, + {"(*Reader).FieldPos", Method, 17, ""}, + {"(*Reader).InputOffset", Method, 19, ""}, + {"(*Reader).Read", Method, 0, ""}, + {"(*Reader).ReadAll", Method, 0, ""}, + {"(*Writer).Error", Method, 1, ""}, + {"(*Writer).Flush", Method, 0, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"(*Writer).WriteAll", Method, 0, ""}, + {"ErrBareQuote", Var, 0, ""}, + {"ErrFieldCount", Var, 0, ""}, + {"ErrQuote", Var, 0, ""}, + {"ErrTrailingComma", Var, 0, ""}, + {"NewReader", Func, 0, "func(r io.Reader) *Reader"}, + {"NewWriter", Func, 0, "func(w io.Writer) *Writer"}, + {"ParseError", Type, 0, ""}, + {"ParseError.Column", Field, 0, ""}, + {"ParseError.Err", Field, 0, ""}, + {"ParseError.Line", Field, 0, ""}, + {"ParseError.StartLine", Field, 10, ""}, + {"Reader", Type, 0, ""}, + {"Reader.Comma", Field, 0, ""}, + {"Reader.Comment", Field, 0, ""}, + {"Reader.FieldsPerRecord", Field, 0, ""}, + {"Reader.LazyQuotes", Field, 0, ""}, + {"Reader.ReuseRecord", Field, 9, ""}, + {"Reader.TrailingComma", Field, 0, ""}, + {"Reader.TrimLeadingSpace", Field, 0, ""}, + {"Writer", Type, 0, ""}, + {"Writer.Comma", Field, 0, ""}, + {"Writer.UseCRLF", Field, 0, ""}, + }, + "encoding/gob": { + {"(*Decoder).Decode", Method, 0, ""}, + {"(*Decoder).DecodeValue", Method, 0, ""}, + {"(*Encoder).Encode", Method, 0, ""}, + {"(*Encoder).EncodeValue", Method, 0, ""}, + {"CommonType", Type, 0, ""}, + {"CommonType.Id", Field, 0, ""}, + {"CommonType.Name", Field, 0, ""}, + {"Decoder", Type, 0, ""}, + {"Encoder", Type, 0, ""}, + {"GobDecoder", Type, 0, ""}, + {"GobEncoder", Type, 0, ""}, + {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"}, + {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"}, + {"Register", Func, 0, "func(value any)"}, + {"RegisterName", Func, 0, "func(name string, value any)"}, + }, + "encoding/hex": { + {"(InvalidByteError).Error", Method, 0, ""}, + {"AppendDecode", Func, 22, "func(dst []byte, src []byte) ([]byte, error)"}, + {"AppendEncode", Func, 22, "func(dst []byte, src []byte) []byte"}, + {"Decode", Func, 0, "func(dst []byte, src []byte) (int, error)"}, + {"DecodeString", Func, 0, "func(s string) ([]byte, error)"}, + {"DecodedLen", Func, 0, "func(x int) int"}, + {"Dump", Func, 0, "func(data []byte) string"}, + {"Dumper", Func, 0, "func(w io.Writer) io.WriteCloser"}, + {"Encode", Func, 0, "func(dst []byte, src []byte) int"}, + {"EncodeToString", Func, 0, "func(src []byte) string"}, + {"EncodedLen", Func, 0, "func(n int) int"}, + {"ErrLength", Var, 0, ""}, + {"InvalidByteError", Type, 0, ""}, + {"NewDecoder", Func, 10, "func(r io.Reader) io.Reader"}, + {"NewEncoder", Func, 10, "func(w io.Writer) io.Writer"}, + }, + "encoding/json": { + {"(*Decoder).Buffered", Method, 1, ""}, + {"(*Decoder).Decode", Method, 0, ""}, + {"(*Decoder).DisallowUnknownFields", Method, 10, ""}, + {"(*Decoder).InputOffset", Method, 14, ""}, + {"(*Decoder).More", Method, 5, ""}, + {"(*Decoder).Token", Method, 5, ""}, + {"(*Decoder).UseNumber", Method, 1, ""}, + {"(*Encoder).Encode", Method, 0, ""}, + {"(*Encoder).SetEscapeHTML", Method, 7, ""}, + {"(*Encoder).SetIndent", Method, 7, ""}, + {"(*InvalidUTF8Error).Error", Method, 0, ""}, + {"(*InvalidUnmarshalError).Error", Method, 0, ""}, + {"(*MarshalerError).Error", Method, 0, ""}, + {"(*MarshalerError).Unwrap", Method, 13, ""}, + {"(*RawMessage).MarshalJSON", Method, 0, ""}, + {"(*RawMessage).UnmarshalJSON", Method, 0, ""}, + {"(*SyntaxError).Error", Method, 0, ""}, + {"(*UnmarshalFieldError).Error", Method, 0, ""}, + {"(*UnmarshalTypeError).Error", Method, 0, ""}, + {"(*UnsupportedTypeError).Error", Method, 0, ""}, + {"(*UnsupportedValueError).Error", Method, 0, ""}, + {"(Delim).String", Method, 5, ""}, + {"(Number).Float64", Method, 1, ""}, + {"(Number).Int64", Method, 1, ""}, + {"(Number).String", Method, 1, ""}, + {"(RawMessage).MarshalJSON", Method, 8, ""}, + {"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"}, + {"Decoder", Type, 0, ""}, + {"Delim", Type, 5, ""}, + {"Encoder", Type, 0, ""}, + {"HTMLEscape", Func, 0, "func(dst *bytes.Buffer, src []byte)"}, + {"Indent", Func, 0, "func(dst *bytes.Buffer, src []byte, prefix string, indent string) error"}, + {"InvalidUTF8Error", Type, 0, ""}, + {"InvalidUTF8Error.S", Field, 0, ""}, + {"InvalidUnmarshalError", Type, 0, ""}, + {"InvalidUnmarshalError.Type", Field, 0, ""}, + {"Marshal", Func, 0, "func(v any) ([]byte, error)"}, + {"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"}, + {"Marshaler", Type, 0, ""}, + {"MarshalerError", Type, 0, ""}, + {"MarshalerError.Err", Field, 0, ""}, + {"MarshalerError.Type", Field, 0, ""}, + {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"}, + {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"}, + {"Number", Type, 1, ""}, + {"RawMessage", Type, 0, ""}, + {"SyntaxError", Type, 0, ""}, + {"SyntaxError.Offset", Field, 0, ""}, + {"Token", Type, 5, ""}, + {"Unmarshal", Func, 0, "func(data []byte, v any) error"}, + {"UnmarshalFieldError", Type, 0, ""}, + {"UnmarshalFieldError.Field", Field, 0, ""}, + {"UnmarshalFieldError.Key", Field, 0, ""}, + {"UnmarshalFieldError.Type", Field, 0, ""}, + {"UnmarshalTypeError", Type, 0, ""}, + {"UnmarshalTypeError.Field", Field, 8, ""}, + {"UnmarshalTypeError.Offset", Field, 5, ""}, + {"UnmarshalTypeError.Struct", Field, 8, ""}, + {"UnmarshalTypeError.Type", Field, 0, ""}, + {"UnmarshalTypeError.Value", Field, 0, ""}, + {"Unmarshaler", Type, 0, ""}, + {"UnsupportedTypeError", Type, 0, ""}, + {"UnsupportedTypeError.Type", Field, 0, ""}, + {"UnsupportedValueError", Type, 0, ""}, + {"UnsupportedValueError.Str", Field, 0, ""}, + {"UnsupportedValueError.Value", Field, 0, ""}, + {"Valid", Func, 9, "func(data []byte) bool"}, + }, + "encoding/pem": { + {"Block", Type, 0, ""}, + {"Block.Bytes", Field, 0, ""}, + {"Block.Headers", Field, 0, ""}, + {"Block.Type", Field, 0, ""}, + {"Decode", Func, 0, "func(data []byte) (p *Block, rest []byte)"}, + {"Encode", Func, 0, "func(out io.Writer, b *Block) error"}, + {"EncodeToMemory", Func, 0, "func(b *Block) []byte"}, + }, + "encoding/xml": { + {"(*Decoder).Decode", Method, 0, ""}, + {"(*Decoder).DecodeElement", Method, 0, ""}, + {"(*Decoder).InputOffset", Method, 4, ""}, + {"(*Decoder).InputPos", Method, 19, ""}, + {"(*Decoder).RawToken", Method, 0, ""}, + {"(*Decoder).Skip", Method, 0, ""}, + {"(*Decoder).Token", Method, 0, ""}, + {"(*Encoder).Close", Method, 20, ""}, + {"(*Encoder).Encode", Method, 0, ""}, + {"(*Encoder).EncodeElement", Method, 2, ""}, + {"(*Encoder).EncodeToken", Method, 2, ""}, + {"(*Encoder).Flush", Method, 2, ""}, + {"(*Encoder).Indent", Method, 1, ""}, + {"(*SyntaxError).Error", Method, 0, ""}, + {"(*TagPathError).Error", Method, 0, ""}, + {"(*UnsupportedTypeError).Error", Method, 0, ""}, + {"(CharData).Copy", Method, 0, ""}, + {"(Comment).Copy", Method, 0, ""}, + {"(Directive).Copy", Method, 0, ""}, + {"(ProcInst).Copy", Method, 0, ""}, + {"(StartElement).Copy", Method, 0, ""}, + {"(StartElement).End", Method, 2, ""}, + {"(UnmarshalError).Error", Method, 0, ""}, + {"Attr", Type, 0, ""}, + {"Attr.Name", Field, 0, ""}, + {"Attr.Value", Field, 0, ""}, + {"CharData", Type, 0, ""}, + {"Comment", Type, 0, ""}, + {"CopyToken", Func, 0, "func(t Token) Token"}, + {"Decoder", Type, 0, ""}, + {"Decoder.AutoClose", Field, 0, ""}, + {"Decoder.CharsetReader", Field, 0, ""}, + {"Decoder.DefaultSpace", Field, 1, ""}, + {"Decoder.Entity", Field, 0, ""}, + {"Decoder.Strict", Field, 0, ""}, + {"Directive", Type, 0, ""}, + {"Encoder", Type, 0, ""}, + {"EndElement", Type, 0, ""}, + {"EndElement.Name", Field, 0, ""}, + {"Escape", Func, 0, "func(w io.Writer, s []byte)"}, + {"EscapeText", Func, 1, "func(w io.Writer, s []byte) error"}, + {"HTMLAutoClose", Var, 0, ""}, + {"HTMLEntity", Var, 0, ""}, + {"Header", Const, 0, ""}, + {"Marshal", Func, 0, "func(v any) ([]byte, error)"}, + {"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"}, + {"Marshaler", Type, 2, ""}, + {"MarshalerAttr", Type, 2, ""}, + {"Name", Type, 0, ""}, + {"Name.Local", Field, 0, ""}, + {"Name.Space", Field, 0, ""}, + {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"}, + {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"}, + {"NewTokenDecoder", Func, 10, "func(t TokenReader) *Decoder"}, + {"ProcInst", Type, 0, ""}, + {"ProcInst.Inst", Field, 0, ""}, + {"ProcInst.Target", Field, 0, ""}, + {"StartElement", Type, 0, ""}, + {"StartElement.Attr", Field, 0, ""}, + {"StartElement.Name", Field, 0, ""}, + {"SyntaxError", Type, 0, ""}, + {"SyntaxError.Line", Field, 0, ""}, + {"SyntaxError.Msg", Field, 0, ""}, + {"TagPathError", Type, 0, ""}, + {"TagPathError.Field1", Field, 0, ""}, + {"TagPathError.Field2", Field, 0, ""}, + {"TagPathError.Struct", Field, 0, ""}, + {"TagPathError.Tag1", Field, 0, ""}, + {"TagPathError.Tag2", Field, 0, ""}, + {"Token", Type, 0, ""}, + {"TokenReader", Type, 10, ""}, + {"Unmarshal", Func, 0, "func(data []byte, v any) error"}, + {"UnmarshalError", Type, 0, ""}, + {"Unmarshaler", Type, 2, ""}, + {"UnmarshalerAttr", Type, 2, ""}, + {"UnsupportedTypeError", Type, 0, ""}, + {"UnsupportedTypeError.Type", Field, 0, ""}, + }, + "errors": { + {"As", Func, 13, "func(err error, target any) bool"}, + {"ErrUnsupported", Var, 21, ""}, + {"Is", Func, 13, "func(err error, target error) bool"}, + {"Join", Func, 20, "func(errs ...error) error"}, + {"New", Func, 0, "func(text string) error"}, + {"Unwrap", Func, 13, "func(err error) error"}, + }, + "expvar": { + {"(*Float).Add", Method, 0, ""}, + {"(*Float).Set", Method, 0, ""}, + {"(*Float).String", Method, 0, ""}, + {"(*Float).Value", Method, 8, ""}, + {"(*Int).Add", Method, 0, ""}, + {"(*Int).Set", Method, 0, ""}, + {"(*Int).String", Method, 0, ""}, + {"(*Int).Value", Method, 8, ""}, + {"(*Map).Add", Method, 0, ""}, + {"(*Map).AddFloat", Method, 0, ""}, + {"(*Map).Delete", Method, 12, ""}, + {"(*Map).Do", Method, 0, ""}, + {"(*Map).Get", Method, 0, ""}, + {"(*Map).Init", Method, 0, ""}, + {"(*Map).Set", Method, 0, ""}, + {"(*Map).String", Method, 0, ""}, + {"(*String).Set", Method, 0, ""}, + {"(*String).String", Method, 0, ""}, + {"(*String).Value", Method, 8, ""}, + {"(Func).String", Method, 0, ""}, + {"(Func).Value", Method, 8, ""}, + {"Do", Func, 0, "func(f func(KeyValue))"}, + {"Float", Type, 0, ""}, + {"Func", Type, 0, ""}, + {"Get", Func, 0, "func(name string) Var"}, + {"Handler", Func, 8, "func() http.Handler"}, + {"Int", Type, 0, ""}, + {"KeyValue", Type, 0, ""}, + {"KeyValue.Key", Field, 0, ""}, + {"KeyValue.Value", Field, 0, ""}, + {"Map", Type, 0, ""}, + {"NewFloat", Func, 0, "func(name string) *Float"}, + {"NewInt", Func, 0, "func(name string) *Int"}, + {"NewMap", Func, 0, "func(name string) *Map"}, + {"NewString", Func, 0, "func(name string) *String"}, + {"Publish", Func, 0, "func(name string, v Var)"}, + {"String", Type, 0, ""}, + {"Var", Type, 0, ""}, + }, + "flag": { + {"(*FlagSet).Arg", Method, 0, ""}, + {"(*FlagSet).Args", Method, 0, ""}, + {"(*FlagSet).Bool", Method, 0, ""}, + {"(*FlagSet).BoolFunc", Method, 21, ""}, + {"(*FlagSet).BoolVar", Method, 0, ""}, + {"(*FlagSet).Duration", Method, 0, ""}, + {"(*FlagSet).DurationVar", Method, 0, ""}, + {"(*FlagSet).ErrorHandling", Method, 10, ""}, + {"(*FlagSet).Float64", Method, 0, ""}, + {"(*FlagSet).Float64Var", Method, 0, ""}, + {"(*FlagSet).Func", Method, 16, ""}, + {"(*FlagSet).Init", Method, 0, ""}, + {"(*FlagSet).Int", Method, 0, ""}, + {"(*FlagSet).Int64", Method, 0, ""}, + {"(*FlagSet).Int64Var", Method, 0, ""}, + {"(*FlagSet).IntVar", Method, 0, ""}, + {"(*FlagSet).Lookup", Method, 0, ""}, + {"(*FlagSet).NArg", Method, 0, ""}, + {"(*FlagSet).NFlag", Method, 0, ""}, + {"(*FlagSet).Name", Method, 10, ""}, + {"(*FlagSet).Output", Method, 10, ""}, + {"(*FlagSet).Parse", Method, 0, ""}, + {"(*FlagSet).Parsed", Method, 0, ""}, + {"(*FlagSet).PrintDefaults", Method, 0, ""}, + {"(*FlagSet).Set", Method, 0, ""}, + {"(*FlagSet).SetOutput", Method, 0, ""}, + {"(*FlagSet).String", Method, 0, ""}, + {"(*FlagSet).StringVar", Method, 0, ""}, + {"(*FlagSet).TextVar", Method, 19, ""}, + {"(*FlagSet).Uint", Method, 0, ""}, + {"(*FlagSet).Uint64", Method, 0, ""}, + {"(*FlagSet).Uint64Var", Method, 0, ""}, + {"(*FlagSet).UintVar", Method, 0, ""}, + {"(*FlagSet).Var", Method, 0, ""}, + {"(*FlagSet).Visit", Method, 0, ""}, + {"(*FlagSet).VisitAll", Method, 0, ""}, + {"Arg", Func, 0, "func(i int) string"}, + {"Args", Func, 0, "func() []string"}, + {"Bool", Func, 0, "func(name string, value bool, usage string) *bool"}, + {"BoolFunc", Func, 21, "func(name string, usage string, fn func(string) error)"}, + {"BoolVar", Func, 0, "func(p *bool, name string, value bool, usage string)"}, + {"CommandLine", Var, 2, ""}, + {"ContinueOnError", Const, 0, ""}, + {"Duration", Func, 0, "func(name string, value time.Duration, usage string) *time.Duration"}, + {"DurationVar", Func, 0, "func(p *time.Duration, name string, value time.Duration, usage string)"}, + {"ErrHelp", Var, 0, ""}, + {"ErrorHandling", Type, 0, ""}, + {"ExitOnError", Const, 0, ""}, + {"Flag", Type, 0, ""}, + {"Flag.DefValue", Field, 0, ""}, + {"Flag.Name", Field, 0, ""}, + {"Flag.Usage", Field, 0, ""}, + {"Flag.Value", Field, 0, ""}, + {"FlagSet", Type, 0, ""}, + {"FlagSet.Usage", Field, 0, ""}, + {"Float64", Func, 0, "func(name string, value float64, usage string) *float64"}, + {"Float64Var", Func, 0, "func(p *float64, name string, value float64, usage string)"}, + {"Func", Func, 16, "func(name string, usage string, fn func(string) error)"}, + {"Getter", Type, 2, ""}, + {"Int", Func, 0, "func(name string, value int, usage string) *int"}, + {"Int64", Func, 0, "func(name string, value int64, usage string) *int64"}, + {"Int64Var", Func, 0, "func(p *int64, name string, value int64, usage string)"}, + {"IntVar", Func, 0, "func(p *int, name string, value int, usage string)"}, + {"Lookup", Func, 0, "func(name string) *Flag"}, + {"NArg", Func, 0, "func() int"}, + {"NFlag", Func, 0, "func() int"}, + {"NewFlagSet", Func, 0, "func(name string, errorHandling ErrorHandling) *FlagSet"}, + {"PanicOnError", Const, 0, ""}, + {"Parse", Func, 0, "func()"}, + {"Parsed", Func, 0, "func() bool"}, + {"PrintDefaults", Func, 0, "func()"}, + {"Set", Func, 0, "func(name string, value string) error"}, + {"String", Func, 0, "func(name string, value string, usage string) *string"}, + {"StringVar", Func, 0, "func(p *string, name string, value string, usage string)"}, + {"TextVar", Func, 19, "func(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string)"}, + {"Uint", Func, 0, "func(name string, value uint, usage string) *uint"}, + {"Uint64", Func, 0, "func(name string, value uint64, usage string) *uint64"}, + {"Uint64Var", Func, 0, "func(p *uint64, name string, value uint64, usage string)"}, + {"UintVar", Func, 0, "func(p *uint, name string, value uint, usage string)"}, + {"UnquoteUsage", Func, 5, "func(flag *Flag) (name string, usage string)"}, + {"Usage", Var, 0, ""}, + {"Value", Type, 0, ""}, + {"Var", Func, 0, "func(value Value, name string, usage string)"}, + {"Visit", Func, 0, "func(fn func(*Flag))"}, + {"VisitAll", Func, 0, "func(fn func(*Flag))"}, + }, + "fmt": { + {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, + {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, + {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, + {"Errorf", Func, 0, "func(format string, a ...any) error"}, + {"FormatString", Func, 20, "func(state State, verb rune) string"}, + {"Formatter", Type, 0, ""}, + {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"}, + {"Fprintf", Func, 0, "func(w io.Writer, format string, a ...any) (n int, err error)"}, + {"Fprintln", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"}, + {"Fscan", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"}, + {"Fscanf", Func, 0, "func(r io.Reader, format string, a ...any) (n int, err error)"}, + {"Fscanln", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"}, + {"GoStringer", Type, 0, ""}, + {"Print", Func, 0, "func(a ...any) (n int, err error)"}, + {"Printf", Func, 0, "func(format string, a ...any) (n int, err error)"}, + {"Println", Func, 0, "func(a ...any) (n int, err error)"}, + {"Scan", Func, 0, "func(a ...any) (n int, err error)"}, + {"ScanState", Type, 0, ""}, + {"Scanf", Func, 0, "func(format string, a ...any) (n int, err error)"}, + {"Scanln", Func, 0, "func(a ...any) (n int, err error)"}, + {"Scanner", Type, 0, ""}, + {"Sprint", Func, 0, "func(a ...any) string"}, + {"Sprintf", Func, 0, "func(format string, a ...any) string"}, + {"Sprintln", Func, 0, "func(a ...any) string"}, + {"Sscan", Func, 0, "func(str string, a ...any) (n int, err error)"}, + {"Sscanf", Func, 0, "func(str string, format string, a ...any) (n int, err error)"}, + {"Sscanln", Func, 0, "func(str string, a ...any) (n int, err error)"}, + {"State", Type, 0, ""}, + {"Stringer", Type, 0, ""}, + }, + "go/ast": { + {"(*ArrayType).End", Method, 0, ""}, + {"(*ArrayType).Pos", Method, 0, ""}, + {"(*AssignStmt).End", Method, 0, ""}, + {"(*AssignStmt).Pos", Method, 0, ""}, + {"(*BadDecl).End", Method, 0, ""}, + {"(*BadDecl).Pos", Method, 0, ""}, + {"(*BadExpr).End", Method, 0, ""}, + {"(*BadExpr).Pos", Method, 0, ""}, + {"(*BadStmt).End", Method, 0, ""}, + {"(*BadStmt).Pos", Method, 0, ""}, + {"(*BasicLit).End", Method, 0, ""}, + {"(*BasicLit).Pos", Method, 0, ""}, + {"(*BinaryExpr).End", Method, 0, ""}, + {"(*BinaryExpr).Pos", Method, 0, ""}, + {"(*BlockStmt).End", Method, 0, ""}, + {"(*BlockStmt).Pos", Method, 0, ""}, + {"(*BranchStmt).End", Method, 0, ""}, + {"(*BranchStmt).Pos", Method, 0, ""}, + {"(*CallExpr).End", Method, 0, ""}, + {"(*CallExpr).Pos", Method, 0, ""}, + {"(*CaseClause).End", Method, 0, ""}, + {"(*CaseClause).Pos", Method, 0, ""}, + {"(*ChanType).End", Method, 0, ""}, + {"(*ChanType).Pos", Method, 0, ""}, + {"(*CommClause).End", Method, 0, ""}, + {"(*CommClause).Pos", Method, 0, ""}, + {"(*Comment).End", Method, 0, ""}, + {"(*Comment).Pos", Method, 0, ""}, + {"(*CommentGroup).End", Method, 0, ""}, + {"(*CommentGroup).Pos", Method, 0, ""}, + {"(*CommentGroup).Text", Method, 0, ""}, + {"(*CompositeLit).End", Method, 0, ""}, + {"(*CompositeLit).Pos", Method, 0, ""}, + {"(*DeclStmt).End", Method, 0, ""}, + {"(*DeclStmt).Pos", Method, 0, ""}, + {"(*DeferStmt).End", Method, 0, ""}, + {"(*DeferStmt).Pos", Method, 0, ""}, + {"(*Ellipsis).End", Method, 0, ""}, + {"(*Ellipsis).Pos", Method, 0, ""}, + {"(*EmptyStmt).End", Method, 0, ""}, + {"(*EmptyStmt).Pos", Method, 0, ""}, + {"(*ExprStmt).End", Method, 0, ""}, + {"(*ExprStmt).Pos", Method, 0, ""}, + {"(*Field).End", Method, 0, ""}, + {"(*Field).Pos", Method, 0, ""}, + {"(*FieldList).End", Method, 0, ""}, + {"(*FieldList).NumFields", Method, 0, ""}, + {"(*FieldList).Pos", Method, 0, ""}, + {"(*File).End", Method, 0, ""}, + {"(*File).Pos", Method, 0, ""}, + {"(*ForStmt).End", Method, 0, ""}, + {"(*ForStmt).Pos", Method, 0, ""}, + {"(*FuncDecl).End", Method, 0, ""}, + {"(*FuncDecl).Pos", Method, 0, ""}, + {"(*FuncLit).End", Method, 0, ""}, + {"(*FuncLit).Pos", Method, 0, ""}, + {"(*FuncType).End", Method, 0, ""}, + {"(*FuncType).Pos", Method, 0, ""}, + {"(*GenDecl).End", Method, 0, ""}, + {"(*GenDecl).Pos", Method, 0, ""}, + {"(*GoStmt).End", Method, 0, ""}, + {"(*GoStmt).Pos", Method, 0, ""}, + {"(*Ident).End", Method, 0, ""}, + {"(*Ident).IsExported", Method, 0, ""}, + {"(*Ident).Pos", Method, 0, ""}, + {"(*Ident).String", Method, 0, ""}, + {"(*IfStmt).End", Method, 0, ""}, + {"(*IfStmt).Pos", Method, 0, ""}, + {"(*ImportSpec).End", Method, 0, ""}, + {"(*ImportSpec).Pos", Method, 0, ""}, + {"(*IncDecStmt).End", Method, 0, ""}, + {"(*IncDecStmt).Pos", Method, 0, ""}, + {"(*IndexExpr).End", Method, 0, ""}, + {"(*IndexExpr).Pos", Method, 0, ""}, + {"(*IndexListExpr).End", Method, 18, ""}, + {"(*IndexListExpr).Pos", Method, 18, ""}, + {"(*InterfaceType).End", Method, 0, ""}, + {"(*InterfaceType).Pos", Method, 0, ""}, + {"(*KeyValueExpr).End", Method, 0, ""}, + {"(*KeyValueExpr).Pos", Method, 0, ""}, + {"(*LabeledStmt).End", Method, 0, ""}, + {"(*LabeledStmt).Pos", Method, 0, ""}, + {"(*MapType).End", Method, 0, ""}, + {"(*MapType).Pos", Method, 0, ""}, + {"(*Object).Pos", Method, 0, ""}, + {"(*Package).End", Method, 0, ""}, + {"(*Package).Pos", Method, 0, ""}, + {"(*ParenExpr).End", Method, 0, ""}, + {"(*ParenExpr).Pos", Method, 0, ""}, + {"(*RangeStmt).End", Method, 0, ""}, + {"(*RangeStmt).Pos", Method, 0, ""}, + {"(*ReturnStmt).End", Method, 0, ""}, + {"(*ReturnStmt).Pos", Method, 0, ""}, + {"(*Scope).Insert", Method, 0, ""}, + {"(*Scope).Lookup", Method, 0, ""}, + {"(*Scope).String", Method, 0, ""}, + {"(*SelectStmt).End", Method, 0, ""}, + {"(*SelectStmt).Pos", Method, 0, ""}, + {"(*SelectorExpr).End", Method, 0, ""}, + {"(*SelectorExpr).Pos", Method, 0, ""}, + {"(*SendStmt).End", Method, 0, ""}, + {"(*SendStmt).Pos", Method, 0, ""}, + {"(*SliceExpr).End", Method, 0, ""}, + {"(*SliceExpr).Pos", Method, 0, ""}, + {"(*StarExpr).End", Method, 0, ""}, + {"(*StarExpr).Pos", Method, 0, ""}, + {"(*StructType).End", Method, 0, ""}, + {"(*StructType).Pos", Method, 0, ""}, + {"(*SwitchStmt).End", Method, 0, ""}, + {"(*SwitchStmt).Pos", Method, 0, ""}, + {"(*TypeAssertExpr).End", Method, 0, ""}, + {"(*TypeAssertExpr).Pos", Method, 0, ""}, + {"(*TypeSpec).End", Method, 0, ""}, + {"(*TypeSpec).Pos", Method, 0, ""}, + {"(*TypeSwitchStmt).End", Method, 0, ""}, + {"(*TypeSwitchStmt).Pos", Method, 0, ""}, + {"(*UnaryExpr).End", Method, 0, ""}, + {"(*UnaryExpr).Pos", Method, 0, ""}, + {"(*ValueSpec).End", Method, 0, ""}, + {"(*ValueSpec).Pos", Method, 0, ""}, + {"(CommentMap).Comments", Method, 1, ""}, + {"(CommentMap).Filter", Method, 1, ""}, + {"(CommentMap).String", Method, 1, ""}, + {"(CommentMap).Update", Method, 1, ""}, + {"(ObjKind).String", Method, 0, ""}, + {"ArrayType", Type, 0, ""}, + {"ArrayType.Elt", Field, 0, ""}, + {"ArrayType.Lbrack", Field, 0, ""}, + {"ArrayType.Len", Field, 0, ""}, + {"AssignStmt", Type, 0, ""}, + {"AssignStmt.Lhs", Field, 0, ""}, + {"AssignStmt.Rhs", Field, 0, ""}, + {"AssignStmt.Tok", Field, 0, ""}, + {"AssignStmt.TokPos", Field, 0, ""}, + {"Bad", Const, 0, ""}, + {"BadDecl", Type, 0, ""}, + {"BadDecl.From", Field, 0, ""}, + {"BadDecl.To", Field, 0, ""}, + {"BadExpr", Type, 0, ""}, + {"BadExpr.From", Field, 0, ""}, + {"BadExpr.To", Field, 0, ""}, + {"BadStmt", Type, 0, ""}, + {"BadStmt.From", Field, 0, ""}, + {"BadStmt.To", Field, 0, ""}, + {"BasicLit", Type, 0, ""}, + {"BasicLit.Kind", Field, 0, ""}, + {"BasicLit.Value", Field, 0, ""}, + {"BasicLit.ValuePos", Field, 0, ""}, + {"BinaryExpr", Type, 0, ""}, + {"BinaryExpr.Op", Field, 0, ""}, + {"BinaryExpr.OpPos", Field, 0, ""}, + {"BinaryExpr.X", Field, 0, ""}, + {"BinaryExpr.Y", Field, 0, ""}, + {"BlockStmt", Type, 0, ""}, + {"BlockStmt.Lbrace", Field, 0, ""}, + {"BlockStmt.List", Field, 0, ""}, + {"BlockStmt.Rbrace", Field, 0, ""}, + {"BranchStmt", Type, 0, ""}, + {"BranchStmt.Label", Field, 0, ""}, + {"BranchStmt.Tok", Field, 0, ""}, + {"BranchStmt.TokPos", Field, 0, ""}, + {"CallExpr", Type, 0, ""}, + {"CallExpr.Args", Field, 0, ""}, + {"CallExpr.Ellipsis", Field, 0, ""}, + {"CallExpr.Fun", Field, 0, ""}, + {"CallExpr.Lparen", Field, 0, ""}, + {"CallExpr.Rparen", Field, 0, ""}, + {"CaseClause", Type, 0, ""}, + {"CaseClause.Body", Field, 0, ""}, + {"CaseClause.Case", Field, 0, ""}, + {"CaseClause.Colon", Field, 0, ""}, + {"CaseClause.List", Field, 0, ""}, + {"ChanDir", Type, 0, ""}, + {"ChanType", Type, 0, ""}, + {"ChanType.Arrow", Field, 1, ""}, + {"ChanType.Begin", Field, 0, ""}, + {"ChanType.Dir", Field, 0, ""}, + {"ChanType.Value", Field, 0, ""}, + {"CommClause", Type, 0, ""}, + {"CommClause.Body", Field, 0, ""}, + {"CommClause.Case", Field, 0, ""}, + {"CommClause.Colon", Field, 0, ""}, + {"CommClause.Comm", Field, 0, ""}, + {"Comment", Type, 0, ""}, + {"Comment.Slash", Field, 0, ""}, + {"Comment.Text", Field, 0, ""}, + {"CommentGroup", Type, 0, ""}, + {"CommentGroup.List", Field, 0, ""}, + {"CommentMap", Type, 1, ""}, + {"CompositeLit", Type, 0, ""}, + {"CompositeLit.Elts", Field, 0, ""}, + {"CompositeLit.Incomplete", Field, 11, ""}, + {"CompositeLit.Lbrace", Field, 0, ""}, + {"CompositeLit.Rbrace", Field, 0, ""}, + {"CompositeLit.Type", Field, 0, ""}, + {"Con", Const, 0, ""}, + {"Decl", Type, 0, ""}, + {"DeclStmt", Type, 0, ""}, + {"DeclStmt.Decl", Field, 0, ""}, + {"DeferStmt", Type, 0, ""}, + {"DeferStmt.Call", Field, 0, ""}, + {"DeferStmt.Defer", Field, 0, ""}, + {"Ellipsis", Type, 0, ""}, + {"Ellipsis.Ellipsis", Field, 0, ""}, + {"Ellipsis.Elt", Field, 0, ""}, + {"EmptyStmt", Type, 0, ""}, + {"EmptyStmt.Implicit", Field, 5, ""}, + {"EmptyStmt.Semicolon", Field, 0, ""}, + {"Expr", Type, 0, ""}, + {"ExprStmt", Type, 0, ""}, + {"ExprStmt.X", Field, 0, ""}, + {"Field", Type, 0, ""}, + {"Field.Comment", Field, 0, ""}, + {"Field.Doc", Field, 0, ""}, + {"Field.Names", Field, 0, ""}, + {"Field.Tag", Field, 0, ""}, + {"Field.Type", Field, 0, ""}, + {"FieldFilter", Type, 0, ""}, + {"FieldList", Type, 0, ""}, + {"FieldList.Closing", Field, 0, ""}, + {"FieldList.List", Field, 0, ""}, + {"FieldList.Opening", Field, 0, ""}, + {"File", Type, 0, ""}, + {"File.Comments", Field, 0, ""}, + {"File.Decls", Field, 0, ""}, + {"File.Doc", Field, 0, ""}, + {"File.FileEnd", Field, 20, ""}, + {"File.FileStart", Field, 20, ""}, + {"File.GoVersion", Field, 21, ""}, + {"File.Imports", Field, 0, ""}, + {"File.Name", Field, 0, ""}, + {"File.Package", Field, 0, ""}, + {"File.Scope", Field, 0, ""}, + {"File.Unresolved", Field, 0, ""}, + {"FileExports", Func, 0, "func(src *File) bool"}, + {"Filter", Type, 0, ""}, + {"FilterDecl", Func, 0, "func(decl Decl, f Filter) bool"}, + {"FilterFile", Func, 0, "func(src *File, f Filter) bool"}, + {"FilterFuncDuplicates", Const, 0, ""}, + {"FilterImportDuplicates", Const, 0, ""}, + {"FilterPackage", Func, 0, "func(pkg *Package, f Filter) bool"}, + {"FilterUnassociatedComments", Const, 0, ""}, + {"ForStmt", Type, 0, ""}, + {"ForStmt.Body", Field, 0, ""}, + {"ForStmt.Cond", Field, 0, ""}, + {"ForStmt.For", Field, 0, ""}, + {"ForStmt.Init", Field, 0, ""}, + {"ForStmt.Post", Field, 0, ""}, + {"Fprint", Func, 0, "func(w io.Writer, fset *token.FileSet, x any, f FieldFilter) error"}, + {"Fun", Const, 0, ""}, + {"FuncDecl", Type, 0, ""}, + {"FuncDecl.Body", Field, 0, ""}, + {"FuncDecl.Doc", Field, 0, ""}, + {"FuncDecl.Name", Field, 0, ""}, + {"FuncDecl.Recv", Field, 0, ""}, + {"FuncDecl.Type", Field, 0, ""}, + {"FuncLit", Type, 0, ""}, + {"FuncLit.Body", Field, 0, ""}, + {"FuncLit.Type", Field, 0, ""}, + {"FuncType", Type, 0, ""}, + {"FuncType.Func", Field, 0, ""}, + {"FuncType.Params", Field, 0, ""}, + {"FuncType.Results", Field, 0, ""}, + {"FuncType.TypeParams", Field, 18, ""}, + {"GenDecl", Type, 0, ""}, + {"GenDecl.Doc", Field, 0, ""}, + {"GenDecl.Lparen", Field, 0, ""}, + {"GenDecl.Rparen", Field, 0, ""}, + {"GenDecl.Specs", Field, 0, ""}, + {"GenDecl.Tok", Field, 0, ""}, + {"GenDecl.TokPos", Field, 0, ""}, + {"GoStmt", Type, 0, ""}, + {"GoStmt.Call", Field, 0, ""}, + {"GoStmt.Go", Field, 0, ""}, + {"Ident", Type, 0, ""}, + {"Ident.Name", Field, 0, ""}, + {"Ident.NamePos", Field, 0, ""}, + {"Ident.Obj", Field, 0, ""}, + {"IfStmt", Type, 0, ""}, + {"IfStmt.Body", Field, 0, ""}, + {"IfStmt.Cond", Field, 0, ""}, + {"IfStmt.Else", Field, 0, ""}, + {"IfStmt.If", Field, 0, ""}, + {"IfStmt.Init", Field, 0, ""}, + {"ImportSpec", Type, 0, ""}, + {"ImportSpec.Comment", Field, 0, ""}, + {"ImportSpec.Doc", Field, 0, ""}, + {"ImportSpec.EndPos", Field, 0, ""}, + {"ImportSpec.Name", Field, 0, ""}, + {"ImportSpec.Path", Field, 0, ""}, + {"Importer", Type, 0, ""}, + {"IncDecStmt", Type, 0, ""}, + {"IncDecStmt.Tok", Field, 0, ""}, + {"IncDecStmt.TokPos", Field, 0, ""}, + {"IncDecStmt.X", Field, 0, ""}, + {"IndexExpr", Type, 0, ""}, + {"IndexExpr.Index", Field, 0, ""}, + {"IndexExpr.Lbrack", Field, 0, ""}, + {"IndexExpr.Rbrack", Field, 0, ""}, + {"IndexExpr.X", Field, 0, ""}, + {"IndexListExpr", Type, 18, ""}, + {"IndexListExpr.Indices", Field, 18, ""}, + {"IndexListExpr.Lbrack", Field, 18, ""}, + {"IndexListExpr.Rbrack", Field, 18, ""}, + {"IndexListExpr.X", Field, 18, ""}, + {"Inspect", Func, 0, "func(node Node, f func(Node) bool)"}, + {"InterfaceType", Type, 0, ""}, + {"InterfaceType.Incomplete", Field, 0, ""}, + {"InterfaceType.Interface", Field, 0, ""}, + {"InterfaceType.Methods", Field, 0, ""}, + {"IsExported", Func, 0, "func(name string) bool"}, + {"IsGenerated", Func, 21, "func(file *File) bool"}, + {"KeyValueExpr", Type, 0, ""}, + {"KeyValueExpr.Colon", Field, 0, ""}, + {"KeyValueExpr.Key", Field, 0, ""}, + {"KeyValueExpr.Value", Field, 0, ""}, + {"LabeledStmt", Type, 0, ""}, + {"LabeledStmt.Colon", Field, 0, ""}, + {"LabeledStmt.Label", Field, 0, ""}, + {"LabeledStmt.Stmt", Field, 0, ""}, + {"Lbl", Const, 0, ""}, + {"MapType", Type, 0, ""}, + {"MapType.Key", Field, 0, ""}, + {"MapType.Map", Field, 0, ""}, + {"MapType.Value", Field, 0, ""}, + {"MergeMode", Type, 0, ""}, + {"MergePackageFiles", Func, 0, "func(pkg *Package, mode MergeMode) *File"}, + {"NewCommentMap", Func, 1, "func(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap"}, + {"NewIdent", Func, 0, "func(name string) *Ident"}, + {"NewObj", Func, 0, "func(kind ObjKind, name string) *Object"}, + {"NewPackage", Func, 0, "func(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error)"}, + {"NewScope", Func, 0, "func(outer *Scope) *Scope"}, + {"Node", Type, 0, ""}, + {"NotNilFilter", Func, 0, "func(_ string, v reflect.Value) bool"}, + {"ObjKind", Type, 0, ""}, + {"Object", Type, 0, ""}, + {"Object.Data", Field, 0, ""}, + {"Object.Decl", Field, 0, ""}, + {"Object.Kind", Field, 0, ""}, + {"Object.Name", Field, 0, ""}, + {"Object.Type", Field, 0, ""}, + {"Package", Type, 0, ""}, + {"Package.Files", Field, 0, ""}, + {"Package.Imports", Field, 0, ""}, + {"Package.Name", Field, 0, ""}, + {"Package.Scope", Field, 0, ""}, + {"PackageExports", Func, 0, "func(pkg *Package) bool"}, + {"ParenExpr", Type, 0, ""}, + {"ParenExpr.Lparen", Field, 0, ""}, + {"ParenExpr.Rparen", Field, 0, ""}, + {"ParenExpr.X", Field, 0, ""}, + {"Pkg", Const, 0, ""}, + {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"}, + {"Print", Func, 0, "func(fset *token.FileSet, x any) error"}, + {"RECV", Const, 0, ""}, + {"RangeStmt", Type, 0, ""}, + {"RangeStmt.Body", Field, 0, ""}, + {"RangeStmt.For", Field, 0, ""}, + {"RangeStmt.Key", Field, 0, ""}, + {"RangeStmt.Range", Field, 20, ""}, + {"RangeStmt.Tok", Field, 0, ""}, + {"RangeStmt.TokPos", Field, 0, ""}, + {"RangeStmt.Value", Field, 0, ""}, + {"RangeStmt.X", Field, 0, ""}, + {"ReturnStmt", Type, 0, ""}, + {"ReturnStmt.Results", Field, 0, ""}, + {"ReturnStmt.Return", Field, 0, ""}, + {"SEND", Const, 0, ""}, + {"Scope", Type, 0, ""}, + {"Scope.Objects", Field, 0, ""}, + {"Scope.Outer", Field, 0, ""}, + {"SelectStmt", Type, 0, ""}, + {"SelectStmt.Body", Field, 0, ""}, + {"SelectStmt.Select", Field, 0, ""}, + {"SelectorExpr", Type, 0, ""}, + {"SelectorExpr.Sel", Field, 0, ""}, + {"SelectorExpr.X", Field, 0, ""}, + {"SendStmt", Type, 0, ""}, + {"SendStmt.Arrow", Field, 0, ""}, + {"SendStmt.Chan", Field, 0, ""}, + {"SendStmt.Value", Field, 0, ""}, + {"SliceExpr", Type, 0, ""}, + {"SliceExpr.High", Field, 0, ""}, + {"SliceExpr.Lbrack", Field, 0, ""}, + {"SliceExpr.Low", Field, 0, ""}, + {"SliceExpr.Max", Field, 2, ""}, + {"SliceExpr.Rbrack", Field, 0, ""}, + {"SliceExpr.Slice3", Field, 2, ""}, + {"SliceExpr.X", Field, 0, ""}, + {"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"}, + {"Spec", Type, 0, ""}, + {"StarExpr", Type, 0, ""}, + {"StarExpr.Star", Field, 0, ""}, + {"StarExpr.X", Field, 0, ""}, + {"Stmt", Type, 0, ""}, + {"StructType", Type, 0, ""}, + {"StructType.Fields", Field, 0, ""}, + {"StructType.Incomplete", Field, 0, ""}, + {"StructType.Struct", Field, 0, ""}, + {"SwitchStmt", Type, 0, ""}, + {"SwitchStmt.Body", Field, 0, ""}, + {"SwitchStmt.Init", Field, 0, ""}, + {"SwitchStmt.Switch", Field, 0, ""}, + {"SwitchStmt.Tag", Field, 0, ""}, + {"Typ", Const, 0, ""}, + {"TypeAssertExpr", Type, 0, ""}, + {"TypeAssertExpr.Lparen", Field, 2, ""}, + {"TypeAssertExpr.Rparen", Field, 2, ""}, + {"TypeAssertExpr.Type", Field, 0, ""}, + {"TypeAssertExpr.X", Field, 0, ""}, + {"TypeSpec", Type, 0, ""}, + {"TypeSpec.Assign", Field, 9, ""}, + {"TypeSpec.Comment", Field, 0, ""}, + {"TypeSpec.Doc", Field, 0, ""}, + {"TypeSpec.Name", Field, 0, ""}, + {"TypeSpec.Type", Field, 0, ""}, + {"TypeSpec.TypeParams", Field, 18, ""}, + {"TypeSwitchStmt", Type, 0, ""}, + {"TypeSwitchStmt.Assign", Field, 0, ""}, + {"TypeSwitchStmt.Body", Field, 0, ""}, + {"TypeSwitchStmt.Init", Field, 0, ""}, + {"TypeSwitchStmt.Switch", Field, 0, ""}, + {"UnaryExpr", Type, 0, ""}, + {"UnaryExpr.Op", Field, 0, ""}, + {"UnaryExpr.OpPos", Field, 0, ""}, + {"UnaryExpr.X", Field, 0, ""}, + {"Unparen", Func, 22, "func(e Expr) Expr"}, + {"ValueSpec", Type, 0, ""}, + {"ValueSpec.Comment", Field, 0, ""}, + {"ValueSpec.Doc", Field, 0, ""}, + {"ValueSpec.Names", Field, 0, ""}, + {"ValueSpec.Type", Field, 0, ""}, + {"ValueSpec.Values", Field, 0, ""}, + {"Var", Const, 0, ""}, + {"Visitor", Type, 0, ""}, + {"Walk", Func, 0, "func(v Visitor, node Node)"}, + }, + "go/build": { + {"(*Context).Import", Method, 0, ""}, + {"(*Context).ImportDir", Method, 0, ""}, + {"(*Context).MatchFile", Method, 2, ""}, + {"(*Context).SrcDirs", Method, 0, ""}, + {"(*MultiplePackageError).Error", Method, 4, ""}, + {"(*NoGoError).Error", Method, 0, ""}, + {"(*Package).IsCommand", Method, 0, ""}, + {"AllowBinary", Const, 0, ""}, + {"ArchChar", Func, 0, "func(goarch string) (string, error)"}, + {"Context", Type, 0, ""}, + {"Context.BuildTags", Field, 0, ""}, + {"Context.CgoEnabled", Field, 0, ""}, + {"Context.Compiler", Field, 0, ""}, + {"Context.Dir", Field, 14, ""}, + {"Context.GOARCH", Field, 0, ""}, + {"Context.GOOS", Field, 0, ""}, + {"Context.GOPATH", Field, 0, ""}, + {"Context.GOROOT", Field, 0, ""}, + {"Context.HasSubdir", Field, 0, ""}, + {"Context.InstallSuffix", Field, 1, ""}, + {"Context.IsAbsPath", Field, 0, ""}, + {"Context.IsDir", Field, 0, ""}, + {"Context.JoinPath", Field, 0, ""}, + {"Context.OpenFile", Field, 0, ""}, + {"Context.ReadDir", Field, 0, ""}, + {"Context.ReleaseTags", Field, 1, ""}, + {"Context.SplitPathList", Field, 0, ""}, + {"Context.ToolTags", Field, 17, ""}, + {"Context.UseAllFiles", Field, 0, ""}, + {"Default", Var, 0, ""}, + {"Directive", Type, 21, ""}, + {"Directive.Pos", Field, 21, ""}, + {"Directive.Text", Field, 21, ""}, + {"FindOnly", Const, 0, ""}, + {"IgnoreVendor", Const, 6, ""}, + {"Import", Func, 0, "func(path string, srcDir string, mode ImportMode) (*Package, error)"}, + {"ImportComment", Const, 4, ""}, + {"ImportDir", Func, 0, "func(dir string, mode ImportMode) (*Package, error)"}, + {"ImportMode", Type, 0, ""}, + {"IsLocalImport", Func, 0, "func(path string) bool"}, + {"MultiplePackageError", Type, 4, ""}, + {"MultiplePackageError.Dir", Field, 4, ""}, + {"MultiplePackageError.Files", Field, 4, ""}, + {"MultiplePackageError.Packages", Field, 4, ""}, + {"NoGoError", Type, 0, ""}, + {"NoGoError.Dir", Field, 0, ""}, + {"Package", Type, 0, ""}, + {"Package.AllTags", Field, 2, ""}, + {"Package.BinDir", Field, 0, ""}, + {"Package.BinaryOnly", Field, 7, ""}, + {"Package.CFiles", Field, 0, ""}, + {"Package.CXXFiles", Field, 2, ""}, + {"Package.CgoCFLAGS", Field, 0, ""}, + {"Package.CgoCPPFLAGS", Field, 2, ""}, + {"Package.CgoCXXFLAGS", Field, 2, ""}, + {"Package.CgoFFLAGS", Field, 7, ""}, + {"Package.CgoFiles", Field, 0, ""}, + {"Package.CgoLDFLAGS", Field, 0, ""}, + {"Package.CgoPkgConfig", Field, 0, ""}, + {"Package.ConflictDir", Field, 2, ""}, + {"Package.Dir", Field, 0, ""}, + {"Package.Directives", Field, 21, ""}, + {"Package.Doc", Field, 0, ""}, + {"Package.EmbedPatternPos", Field, 16, ""}, + {"Package.EmbedPatterns", Field, 16, ""}, + {"Package.FFiles", Field, 7, ""}, + {"Package.GoFiles", Field, 0, ""}, + {"Package.Goroot", Field, 0, ""}, + {"Package.HFiles", Field, 0, ""}, + {"Package.IgnoredGoFiles", Field, 1, ""}, + {"Package.IgnoredOtherFiles", Field, 16, ""}, + {"Package.ImportComment", Field, 4, ""}, + {"Package.ImportPath", Field, 0, ""}, + {"Package.ImportPos", Field, 0, ""}, + {"Package.Imports", Field, 0, ""}, + {"Package.InvalidGoFiles", Field, 6, ""}, + {"Package.MFiles", Field, 3, ""}, + {"Package.Name", Field, 0, ""}, + {"Package.PkgObj", Field, 0, ""}, + {"Package.PkgRoot", Field, 0, ""}, + {"Package.PkgTargetRoot", Field, 5, ""}, + {"Package.Root", Field, 0, ""}, + {"Package.SFiles", Field, 0, ""}, + {"Package.SrcRoot", Field, 0, ""}, + {"Package.SwigCXXFiles", Field, 1, ""}, + {"Package.SwigFiles", Field, 1, ""}, + {"Package.SysoFiles", Field, 0, ""}, + {"Package.TestDirectives", Field, 21, ""}, + {"Package.TestEmbedPatternPos", Field, 16, ""}, + {"Package.TestEmbedPatterns", Field, 16, ""}, + {"Package.TestGoFiles", Field, 0, ""}, + {"Package.TestImportPos", Field, 0, ""}, + {"Package.TestImports", Field, 0, ""}, + {"Package.XTestDirectives", Field, 21, ""}, + {"Package.XTestEmbedPatternPos", Field, 16, ""}, + {"Package.XTestEmbedPatterns", Field, 16, ""}, + {"Package.XTestGoFiles", Field, 0, ""}, + {"Package.XTestImportPos", Field, 0, ""}, + {"Package.XTestImports", Field, 0, ""}, + {"ToolDir", Var, 0, ""}, + }, + "go/build/constraint": { + {"(*AndExpr).Eval", Method, 16, ""}, + {"(*AndExpr).String", Method, 16, ""}, + {"(*NotExpr).Eval", Method, 16, ""}, + {"(*NotExpr).String", Method, 16, ""}, + {"(*OrExpr).Eval", Method, 16, ""}, + {"(*OrExpr).String", Method, 16, ""}, + {"(*SyntaxError).Error", Method, 16, ""}, + {"(*TagExpr).Eval", Method, 16, ""}, + {"(*TagExpr).String", Method, 16, ""}, + {"AndExpr", Type, 16, ""}, + {"AndExpr.X", Field, 16, ""}, + {"AndExpr.Y", Field, 16, ""}, + {"Expr", Type, 16, ""}, + {"GoVersion", Func, 21, "func(x Expr) string"}, + {"IsGoBuild", Func, 16, "func(line string) bool"}, + {"IsPlusBuild", Func, 16, "func(line string) bool"}, + {"NotExpr", Type, 16, ""}, + {"NotExpr.X", Field, 16, ""}, + {"OrExpr", Type, 16, ""}, + {"OrExpr.X", Field, 16, ""}, + {"OrExpr.Y", Field, 16, ""}, + {"Parse", Func, 16, "func(line string) (Expr, error)"}, + {"PlusBuildLines", Func, 16, "func(x Expr) ([]string, error)"}, + {"SyntaxError", Type, 16, ""}, + {"SyntaxError.Err", Field, 16, ""}, + {"SyntaxError.Offset", Field, 16, ""}, + {"TagExpr", Type, 16, ""}, + {"TagExpr.Tag", Field, 16, ""}, + }, + "go/constant": { + {"(Kind).String", Method, 18, ""}, + {"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"}, + {"BitLen", Func, 5, "func(x Value) int"}, + {"Bool", Const, 5, ""}, + {"BoolVal", Func, 5, "func(x Value) bool"}, + {"Bytes", Func, 5, "func(x Value) []byte"}, + {"Compare", Func, 5, "func(x_ Value, op token.Token, y_ Value) bool"}, + {"Complex", Const, 5, ""}, + {"Denom", Func, 5, "func(x Value) Value"}, + {"Float", Const, 5, ""}, + {"Float32Val", Func, 5, "func(x Value) (float32, bool)"}, + {"Float64Val", Func, 5, "func(x Value) (float64, bool)"}, + {"Imag", Func, 5, "func(x Value) Value"}, + {"Int", Const, 5, ""}, + {"Int64Val", Func, 5, "func(x Value) (int64, bool)"}, + {"Kind", Type, 5, ""}, + {"Make", Func, 13, "func(x any) Value"}, + {"MakeBool", Func, 5, "func(b bool) Value"}, + {"MakeFloat64", Func, 5, "func(x float64) Value"}, + {"MakeFromBytes", Func, 5, "func(bytes []byte) Value"}, + {"MakeFromLiteral", Func, 5, "func(lit string, tok token.Token, zero uint) Value"}, + {"MakeImag", Func, 5, "func(x Value) Value"}, + {"MakeInt64", Func, 5, "func(x int64) Value"}, + {"MakeString", Func, 5, "func(s string) Value"}, + {"MakeUint64", Func, 5, "func(x uint64) Value"}, + {"MakeUnknown", Func, 5, "func() Value"}, + {"Num", Func, 5, "func(x Value) Value"}, + {"Real", Func, 5, "func(x Value) Value"}, + {"Shift", Func, 5, "func(x Value, op token.Token, s uint) Value"}, + {"Sign", Func, 5, "func(x Value) int"}, + {"String", Const, 5, ""}, + {"StringVal", Func, 5, "func(x Value) string"}, + {"ToComplex", Func, 6, "func(x Value) Value"}, + {"ToFloat", Func, 6, "func(x Value) Value"}, + {"ToInt", Func, 6, "func(x Value) Value"}, + {"Uint64Val", Func, 5, "func(x Value) (uint64, bool)"}, + {"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"}, + {"Unknown", Const, 5, ""}, + {"Val", Func, 13, "func(x Value) any"}, + {"Value", Type, 5, ""}, + }, + "go/doc": { + {"(*Package).Filter", Method, 0, ""}, + {"(*Package).HTML", Method, 19, ""}, + {"(*Package).Markdown", Method, 19, ""}, + {"(*Package).Parser", Method, 19, ""}, + {"(*Package).Printer", Method, 19, ""}, + {"(*Package).Synopsis", Method, 19, ""}, + {"(*Package).Text", Method, 19, ""}, + {"AllDecls", Const, 0, ""}, + {"AllMethods", Const, 0, ""}, + {"Example", Type, 0, ""}, + {"Example.Code", Field, 0, ""}, + {"Example.Comments", Field, 0, ""}, + {"Example.Doc", Field, 0, ""}, + {"Example.EmptyOutput", Field, 1, ""}, + {"Example.Name", Field, 0, ""}, + {"Example.Order", Field, 1, ""}, + {"Example.Output", Field, 0, ""}, + {"Example.Play", Field, 1, ""}, + {"Example.Suffix", Field, 14, ""}, + {"Example.Unordered", Field, 7, ""}, + {"Examples", Func, 0, "func(testFiles ...*ast.File) []*Example"}, + {"Filter", Type, 0, ""}, + {"Func", Type, 0, ""}, + {"Func.Decl", Field, 0, ""}, + {"Func.Doc", Field, 0, ""}, + {"Func.Examples", Field, 14, ""}, + {"Func.Level", Field, 0, ""}, + {"Func.Name", Field, 0, ""}, + {"Func.Orig", Field, 0, ""}, + {"Func.Recv", Field, 0, ""}, + {"IllegalPrefixes", Var, 1, ""}, + {"IsPredeclared", Func, 8, "func(s string) bool"}, + {"Mode", Type, 0, ""}, + {"New", Func, 0, "func(pkg *ast.Package, importPath string, mode Mode) *Package"}, + {"NewFromFiles", Func, 14, "func(fset *token.FileSet, files []*ast.File, importPath string, opts ...any) (*Package, error)"}, + {"Note", Type, 1, ""}, + {"Note.Body", Field, 1, ""}, + {"Note.End", Field, 1, ""}, + {"Note.Pos", Field, 1, ""}, + {"Note.UID", Field, 1, ""}, + {"Package", Type, 0, ""}, + {"Package.Bugs", Field, 0, ""}, + {"Package.Consts", Field, 0, ""}, + {"Package.Doc", Field, 0, ""}, + {"Package.Examples", Field, 14, ""}, + {"Package.Filenames", Field, 0, ""}, + {"Package.Funcs", Field, 0, ""}, + {"Package.ImportPath", Field, 0, ""}, + {"Package.Imports", Field, 0, ""}, + {"Package.Name", Field, 0, ""}, + {"Package.Notes", Field, 1, ""}, + {"Package.Types", Field, 0, ""}, + {"Package.Vars", Field, 0, ""}, + {"PreserveAST", Const, 12, ""}, + {"Synopsis", Func, 0, "func(text string) string"}, + {"ToHTML", Func, 0, "func(w io.Writer, text string, words map[string]string)"}, + {"ToText", Func, 0, "func(w io.Writer, text string, prefix string, codePrefix string, width int)"}, + {"Type", Type, 0, ""}, + {"Type.Consts", Field, 0, ""}, + {"Type.Decl", Field, 0, ""}, + {"Type.Doc", Field, 0, ""}, + {"Type.Examples", Field, 14, ""}, + {"Type.Funcs", Field, 0, ""}, + {"Type.Methods", Field, 0, ""}, + {"Type.Name", Field, 0, ""}, + {"Type.Vars", Field, 0, ""}, + {"Value", Type, 0, ""}, + {"Value.Decl", Field, 0, ""}, + {"Value.Doc", Field, 0, ""}, + {"Value.Names", Field, 0, ""}, + }, + "go/doc/comment": { + {"(*DocLink).DefaultURL", Method, 19, ""}, + {"(*Heading).DefaultID", Method, 19, ""}, + {"(*List).BlankBefore", Method, 19, ""}, + {"(*List).BlankBetween", Method, 19, ""}, + {"(*Parser).Parse", Method, 19, ""}, + {"(*Printer).Comment", Method, 19, ""}, + {"(*Printer).HTML", Method, 19, ""}, + {"(*Printer).Markdown", Method, 19, ""}, + {"(*Printer).Text", Method, 19, ""}, + {"Block", Type, 19, ""}, + {"Code", Type, 19, ""}, + {"Code.Text", Field, 19, ""}, + {"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"}, + {"Doc", Type, 19, ""}, + {"Doc.Content", Field, 19, ""}, + {"Doc.Links", Field, 19, ""}, + {"DocLink", Type, 19, ""}, + {"DocLink.ImportPath", Field, 19, ""}, + {"DocLink.Name", Field, 19, ""}, + {"DocLink.Recv", Field, 19, ""}, + {"DocLink.Text", Field, 19, ""}, + {"Heading", Type, 19, ""}, + {"Heading.Text", Field, 19, ""}, + {"Italic", Type, 19, ""}, + {"Link", Type, 19, ""}, + {"Link.Auto", Field, 19, ""}, + {"Link.Text", Field, 19, ""}, + {"Link.URL", Field, 19, ""}, + {"LinkDef", Type, 19, ""}, + {"LinkDef.Text", Field, 19, ""}, + {"LinkDef.URL", Field, 19, ""}, + {"LinkDef.Used", Field, 19, ""}, + {"List", Type, 19, ""}, + {"List.ForceBlankBefore", Field, 19, ""}, + {"List.ForceBlankBetween", Field, 19, ""}, + {"List.Items", Field, 19, ""}, + {"ListItem", Type, 19, ""}, + {"ListItem.Content", Field, 19, ""}, + {"ListItem.Number", Field, 19, ""}, + {"Paragraph", Type, 19, ""}, + {"Paragraph.Text", Field, 19, ""}, + {"Parser", Type, 19, ""}, + {"Parser.LookupPackage", Field, 19, ""}, + {"Parser.LookupSym", Field, 19, ""}, + {"Parser.Words", Field, 19, ""}, + {"Plain", Type, 19, ""}, + {"Printer", Type, 19, ""}, + {"Printer.DocLinkBaseURL", Field, 19, ""}, + {"Printer.DocLinkURL", Field, 19, ""}, + {"Printer.HeadingID", Field, 19, ""}, + {"Printer.HeadingLevel", Field, 19, ""}, + {"Printer.TextCodePrefix", Field, 19, ""}, + {"Printer.TextPrefix", Field, 19, ""}, + {"Printer.TextWidth", Field, 19, ""}, + {"Text", Type, 19, ""}, + }, + "go/format": { + {"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"}, + {"Source", Func, 1, "func(src []byte) ([]byte, error)"}, + }, + "go/importer": { + {"Default", Func, 5, "func() types.Importer"}, + {"For", Func, 5, "func(compiler string, lookup Lookup) types.Importer"}, + {"ForCompiler", Func, 12, "func(fset *token.FileSet, compiler string, lookup Lookup) types.Importer"}, + {"Lookup", Type, 5, ""}, + }, + "go/parser": { + {"AllErrors", Const, 1, ""}, + {"DeclarationErrors", Const, 0, ""}, + {"ImportsOnly", Const, 0, ""}, + {"Mode", Type, 0, ""}, + {"PackageClauseOnly", Const, 0, ""}, + {"ParseComments", Const, 0, ""}, + {"ParseDir", Func, 0, "func(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error)"}, + {"ParseExpr", Func, 0, "func(x string) (ast.Expr, error)"}, + {"ParseExprFrom", Func, 5, "func(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error)"}, + {"ParseFile", Func, 0, "func(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error)"}, + {"SkipObjectResolution", Const, 17, ""}, + {"SpuriousErrors", Const, 0, ""}, + {"Trace", Const, 0, ""}, + }, + "go/printer": { + {"(*Config).Fprint", Method, 0, ""}, + {"CommentedNode", Type, 0, ""}, + {"CommentedNode.Comments", Field, 0, ""}, + {"CommentedNode.Node", Field, 0, ""}, + {"Config", Type, 0, ""}, + {"Config.Indent", Field, 1, ""}, + {"Config.Mode", Field, 0, ""}, + {"Config.Tabwidth", Field, 0, ""}, + {"Fprint", Func, 0, "func(output io.Writer, fset *token.FileSet, node any) error"}, + {"Mode", Type, 0, ""}, + {"RawFormat", Const, 0, ""}, + {"SourcePos", Const, 0, ""}, + {"TabIndent", Const, 0, ""}, + {"UseSpaces", Const, 0, ""}, + }, + "go/scanner": { + {"(*ErrorList).Add", Method, 0, ""}, + {"(*ErrorList).RemoveMultiples", Method, 0, ""}, + {"(*ErrorList).Reset", Method, 0, ""}, + {"(*Scanner).Init", Method, 0, ""}, + {"(*Scanner).Scan", Method, 0, ""}, + {"(Error).Error", Method, 0, ""}, + {"(ErrorList).Err", Method, 0, ""}, + {"(ErrorList).Error", Method, 0, ""}, + {"(ErrorList).Len", Method, 0, ""}, + {"(ErrorList).Less", Method, 0, ""}, + {"(ErrorList).Sort", Method, 0, ""}, + {"(ErrorList).Swap", Method, 0, ""}, + {"Error", Type, 0, ""}, + {"Error.Msg", Field, 0, ""}, + {"Error.Pos", Field, 0, ""}, + {"ErrorHandler", Type, 0, ""}, + {"ErrorList", Type, 0, ""}, + {"Mode", Type, 0, ""}, + {"PrintError", Func, 0, "func(w io.Writer, err error)"}, + {"ScanComments", Const, 0, ""}, + {"Scanner", Type, 0, ""}, + {"Scanner.ErrorCount", Field, 0, ""}, + }, + "go/token": { + {"(*File).AddLine", Method, 0, ""}, + {"(*File).AddLineColumnInfo", Method, 11, ""}, + {"(*File).AddLineInfo", Method, 0, ""}, + {"(*File).Base", Method, 0, ""}, + {"(*File).Line", Method, 0, ""}, + {"(*File).LineCount", Method, 0, ""}, + {"(*File).LineStart", Method, 12, ""}, + {"(*File).Lines", Method, 21, ""}, + {"(*File).MergeLine", Method, 2, ""}, + {"(*File).Name", Method, 0, ""}, + {"(*File).Offset", Method, 0, ""}, + {"(*File).Pos", Method, 0, ""}, + {"(*File).Position", Method, 0, ""}, + {"(*File).PositionFor", Method, 4, ""}, + {"(*File).SetLines", Method, 0, ""}, + {"(*File).SetLinesForContent", Method, 0, ""}, + {"(*File).Size", Method, 0, ""}, + {"(*FileSet).AddFile", Method, 0, ""}, + {"(*FileSet).Base", Method, 0, ""}, + {"(*FileSet).File", Method, 0, ""}, + {"(*FileSet).Iterate", Method, 0, ""}, + {"(*FileSet).Position", Method, 0, ""}, + {"(*FileSet).PositionFor", Method, 4, ""}, + {"(*FileSet).Read", Method, 0, ""}, + {"(*FileSet).RemoveFile", Method, 20, ""}, + {"(*FileSet).Write", Method, 0, ""}, + {"(*Position).IsValid", Method, 0, ""}, + {"(Pos).IsValid", Method, 0, ""}, + {"(Position).String", Method, 0, ""}, + {"(Token).IsKeyword", Method, 0, ""}, + {"(Token).IsLiteral", Method, 0, ""}, + {"(Token).IsOperator", Method, 0, ""}, + {"(Token).Precedence", Method, 0, ""}, + {"(Token).String", Method, 0, ""}, + {"ADD", Const, 0, ""}, + {"ADD_ASSIGN", Const, 0, ""}, + {"AND", Const, 0, ""}, + {"AND_ASSIGN", Const, 0, ""}, + {"AND_NOT", Const, 0, ""}, + {"AND_NOT_ASSIGN", Const, 0, ""}, + {"ARROW", Const, 0, ""}, + {"ASSIGN", Const, 0, ""}, + {"BREAK", Const, 0, ""}, + {"CASE", Const, 0, ""}, + {"CHAN", Const, 0, ""}, + {"CHAR", Const, 0, ""}, + {"COLON", Const, 0, ""}, + {"COMMA", Const, 0, ""}, + {"COMMENT", Const, 0, ""}, + {"CONST", Const, 0, ""}, + {"CONTINUE", Const, 0, ""}, + {"DEC", Const, 0, ""}, + {"DEFAULT", Const, 0, ""}, + {"DEFER", Const, 0, ""}, + {"DEFINE", Const, 0, ""}, + {"ELLIPSIS", Const, 0, ""}, + {"ELSE", Const, 0, ""}, + {"EOF", Const, 0, ""}, + {"EQL", Const, 0, ""}, + {"FALLTHROUGH", Const, 0, ""}, + {"FLOAT", Const, 0, ""}, + {"FOR", Const, 0, ""}, + {"FUNC", Const, 0, ""}, + {"File", Type, 0, ""}, + {"FileSet", Type, 0, ""}, + {"GEQ", Const, 0, ""}, + {"GO", Const, 0, ""}, + {"GOTO", Const, 0, ""}, + {"GTR", Const, 0, ""}, + {"HighestPrec", Const, 0, ""}, + {"IDENT", Const, 0, ""}, + {"IF", Const, 0, ""}, + {"ILLEGAL", Const, 0, ""}, + {"IMAG", Const, 0, ""}, + {"IMPORT", Const, 0, ""}, + {"INC", Const, 0, ""}, + {"INT", Const, 0, ""}, + {"INTERFACE", Const, 0, ""}, + {"IsExported", Func, 13, "func(name string) bool"}, + {"IsIdentifier", Func, 13, "func(name string) bool"}, + {"IsKeyword", Func, 13, "func(name string) bool"}, + {"LAND", Const, 0, ""}, + {"LBRACE", Const, 0, ""}, + {"LBRACK", Const, 0, ""}, + {"LEQ", Const, 0, ""}, + {"LOR", Const, 0, ""}, + {"LPAREN", Const, 0, ""}, + {"LSS", Const, 0, ""}, + {"Lookup", Func, 0, "func(ident string) Token"}, + {"LowestPrec", Const, 0, ""}, + {"MAP", Const, 0, ""}, + {"MUL", Const, 0, ""}, + {"MUL_ASSIGN", Const, 0, ""}, + {"NEQ", Const, 0, ""}, + {"NOT", Const, 0, ""}, + {"NewFileSet", Func, 0, "func() *FileSet"}, + {"NoPos", Const, 0, ""}, + {"OR", Const, 0, ""}, + {"OR_ASSIGN", Const, 0, ""}, + {"PACKAGE", Const, 0, ""}, + {"PERIOD", Const, 0, ""}, + {"Pos", Type, 0, ""}, + {"Position", Type, 0, ""}, + {"Position.Column", Field, 0, ""}, + {"Position.Filename", Field, 0, ""}, + {"Position.Line", Field, 0, ""}, + {"Position.Offset", Field, 0, ""}, + {"QUO", Const, 0, ""}, + {"QUO_ASSIGN", Const, 0, ""}, + {"RANGE", Const, 0, ""}, + {"RBRACE", Const, 0, ""}, + {"RBRACK", Const, 0, ""}, + {"REM", Const, 0, ""}, + {"REM_ASSIGN", Const, 0, ""}, + {"RETURN", Const, 0, ""}, + {"RPAREN", Const, 0, ""}, + {"SELECT", Const, 0, ""}, + {"SEMICOLON", Const, 0, ""}, + {"SHL", Const, 0, ""}, + {"SHL_ASSIGN", Const, 0, ""}, + {"SHR", Const, 0, ""}, + {"SHR_ASSIGN", Const, 0, ""}, + {"STRING", Const, 0, ""}, + {"STRUCT", Const, 0, ""}, + {"SUB", Const, 0, ""}, + {"SUB_ASSIGN", Const, 0, ""}, + {"SWITCH", Const, 0, ""}, + {"TILDE", Const, 18, ""}, + {"TYPE", Const, 0, ""}, + {"Token", Type, 0, ""}, + {"UnaryPrec", Const, 0, ""}, + {"VAR", Const, 0, ""}, + {"XOR", Const, 0, ""}, + {"XOR_ASSIGN", Const, 0, ""}, + }, + "go/types": { + {"(*Alias).Obj", Method, 22, ""}, + {"(*Alias).Origin", Method, 23, ""}, + {"(*Alias).Rhs", Method, 23, ""}, + {"(*Alias).SetTypeParams", Method, 23, ""}, + {"(*Alias).String", Method, 22, ""}, + {"(*Alias).TypeArgs", Method, 23, ""}, + {"(*Alias).TypeParams", Method, 23, ""}, + {"(*Alias).Underlying", Method, 22, ""}, + {"(*ArgumentError).Error", Method, 18, ""}, + {"(*ArgumentError).Unwrap", Method, 18, ""}, + {"(*Array).Elem", Method, 5, ""}, + {"(*Array).Len", Method, 5, ""}, + {"(*Array).String", Method, 5, ""}, + {"(*Array).Underlying", Method, 5, ""}, + {"(*Basic).Info", Method, 5, ""}, + {"(*Basic).Kind", Method, 5, ""}, + {"(*Basic).Name", Method, 5, ""}, + {"(*Basic).String", Method, 5, ""}, + {"(*Basic).Underlying", Method, 5, ""}, + {"(*Builtin).Exported", Method, 5, ""}, + {"(*Builtin).Id", Method, 5, ""}, + {"(*Builtin).Name", Method, 5, ""}, + {"(*Builtin).Parent", Method, 5, ""}, + {"(*Builtin).Pkg", Method, 5, ""}, + {"(*Builtin).Pos", Method, 5, ""}, + {"(*Builtin).String", Method, 5, ""}, + {"(*Builtin).Type", Method, 5, ""}, + {"(*Chan).Dir", Method, 5, ""}, + {"(*Chan).Elem", Method, 5, ""}, + {"(*Chan).String", Method, 5, ""}, + {"(*Chan).Underlying", Method, 5, ""}, + {"(*Checker).Files", Method, 5, ""}, + {"(*Config).Check", Method, 5, ""}, + {"(*Const).Exported", Method, 5, ""}, + {"(*Const).Id", Method, 5, ""}, + {"(*Const).Name", Method, 5, ""}, + {"(*Const).Parent", Method, 5, ""}, + {"(*Const).Pkg", Method, 5, ""}, + {"(*Const).Pos", Method, 5, ""}, + {"(*Const).String", Method, 5, ""}, + {"(*Const).Type", Method, 5, ""}, + {"(*Const).Val", Method, 5, ""}, + {"(*Func).Exported", Method, 5, ""}, + {"(*Func).FullName", Method, 5, ""}, + {"(*Func).Id", Method, 5, ""}, + {"(*Func).Name", Method, 5, ""}, + {"(*Func).Origin", Method, 19, ""}, + {"(*Func).Parent", Method, 5, ""}, + {"(*Func).Pkg", Method, 5, ""}, + {"(*Func).Pos", Method, 5, ""}, + {"(*Func).Scope", Method, 5, ""}, + {"(*Func).Signature", Method, 23, ""}, + {"(*Func).String", Method, 5, ""}, + {"(*Func).Type", Method, 5, ""}, + {"(*Info).ObjectOf", Method, 5, ""}, + {"(*Info).PkgNameOf", Method, 22, ""}, + {"(*Info).TypeOf", Method, 5, ""}, + {"(*Initializer).String", Method, 5, ""}, + {"(*Interface).Complete", Method, 5, ""}, + {"(*Interface).Embedded", Method, 5, ""}, + {"(*Interface).EmbeddedType", Method, 11, ""}, + {"(*Interface).EmbeddedTypes", Method, 24, ""}, + {"(*Interface).Empty", Method, 5, ""}, + {"(*Interface).ExplicitMethod", Method, 5, ""}, + {"(*Interface).ExplicitMethods", Method, 24, ""}, + {"(*Interface).IsComparable", Method, 18, ""}, + {"(*Interface).IsImplicit", Method, 18, ""}, + {"(*Interface).IsMethodSet", Method, 18, ""}, + {"(*Interface).MarkImplicit", Method, 18, ""}, + {"(*Interface).Method", Method, 5, ""}, + {"(*Interface).Methods", Method, 24, ""}, + {"(*Interface).NumEmbeddeds", Method, 5, ""}, + {"(*Interface).NumExplicitMethods", Method, 5, ""}, + {"(*Interface).NumMethods", Method, 5, ""}, + {"(*Interface).String", Method, 5, ""}, + {"(*Interface).Underlying", Method, 5, ""}, + {"(*Label).Exported", Method, 5, ""}, + {"(*Label).Id", Method, 5, ""}, + {"(*Label).Name", Method, 5, ""}, + {"(*Label).Parent", Method, 5, ""}, + {"(*Label).Pkg", Method, 5, ""}, + {"(*Label).Pos", Method, 5, ""}, + {"(*Label).String", Method, 5, ""}, + {"(*Label).Type", Method, 5, ""}, + {"(*Map).Elem", Method, 5, ""}, + {"(*Map).Key", Method, 5, ""}, + {"(*Map).String", Method, 5, ""}, + {"(*Map).Underlying", Method, 5, ""}, + {"(*MethodSet).At", Method, 5, ""}, + {"(*MethodSet).Len", Method, 5, ""}, + {"(*MethodSet).Lookup", Method, 5, ""}, + {"(*MethodSet).Methods", Method, 24, ""}, + {"(*MethodSet).String", Method, 5, ""}, + {"(*Named).AddMethod", Method, 5, ""}, + {"(*Named).Method", Method, 5, ""}, + {"(*Named).Methods", Method, 24, ""}, + {"(*Named).NumMethods", Method, 5, ""}, + {"(*Named).Obj", Method, 5, ""}, + {"(*Named).Origin", Method, 18, ""}, + {"(*Named).SetTypeParams", Method, 18, ""}, + {"(*Named).SetUnderlying", Method, 5, ""}, + {"(*Named).String", Method, 5, ""}, + {"(*Named).TypeArgs", Method, 18, ""}, + {"(*Named).TypeParams", Method, 18, ""}, + {"(*Named).Underlying", Method, 5, ""}, + {"(*Nil).Exported", Method, 5, ""}, + {"(*Nil).Id", Method, 5, ""}, + {"(*Nil).Name", Method, 5, ""}, + {"(*Nil).Parent", Method, 5, ""}, + {"(*Nil).Pkg", Method, 5, ""}, + {"(*Nil).Pos", Method, 5, ""}, + {"(*Nil).String", Method, 5, ""}, + {"(*Nil).Type", Method, 5, ""}, + {"(*Package).Complete", Method, 5, ""}, + {"(*Package).GoVersion", Method, 21, ""}, + {"(*Package).Imports", Method, 5, ""}, + {"(*Package).MarkComplete", Method, 5, ""}, + {"(*Package).Name", Method, 5, ""}, + {"(*Package).Path", Method, 5, ""}, + {"(*Package).Scope", Method, 5, ""}, + {"(*Package).SetImports", Method, 5, ""}, + {"(*Package).SetName", Method, 6, ""}, + {"(*Package).String", Method, 5, ""}, + {"(*PkgName).Exported", Method, 5, ""}, + {"(*PkgName).Id", Method, 5, ""}, + {"(*PkgName).Imported", Method, 5, ""}, + {"(*PkgName).Name", Method, 5, ""}, + {"(*PkgName).Parent", Method, 5, ""}, + {"(*PkgName).Pkg", Method, 5, ""}, + {"(*PkgName).Pos", Method, 5, ""}, + {"(*PkgName).String", Method, 5, ""}, + {"(*PkgName).Type", Method, 5, ""}, + {"(*Pointer).Elem", Method, 5, ""}, + {"(*Pointer).String", Method, 5, ""}, + {"(*Pointer).Underlying", Method, 5, ""}, + {"(*Scope).Child", Method, 5, ""}, + {"(*Scope).Children", Method, 24, ""}, + {"(*Scope).Contains", Method, 5, ""}, + {"(*Scope).End", Method, 5, ""}, + {"(*Scope).Innermost", Method, 5, ""}, + {"(*Scope).Insert", Method, 5, ""}, + {"(*Scope).Len", Method, 5, ""}, + {"(*Scope).Lookup", Method, 5, ""}, + {"(*Scope).LookupParent", Method, 5, ""}, + {"(*Scope).Names", Method, 5, ""}, + {"(*Scope).NumChildren", Method, 5, ""}, + {"(*Scope).Parent", Method, 5, ""}, + {"(*Scope).Pos", Method, 5, ""}, + {"(*Scope).String", Method, 5, ""}, + {"(*Scope).WriteTo", Method, 5, ""}, + {"(*Selection).Index", Method, 5, ""}, + {"(*Selection).Indirect", Method, 5, ""}, + {"(*Selection).Kind", Method, 5, ""}, + {"(*Selection).Obj", Method, 5, ""}, + {"(*Selection).Recv", Method, 5, ""}, + {"(*Selection).String", Method, 5, ""}, + {"(*Selection).Type", Method, 5, ""}, + {"(*Signature).Params", Method, 5, ""}, + {"(*Signature).Recv", Method, 5, ""}, + {"(*Signature).RecvTypeParams", Method, 18, ""}, + {"(*Signature).Results", Method, 5, ""}, + {"(*Signature).String", Method, 5, ""}, + {"(*Signature).TypeParams", Method, 18, ""}, + {"(*Signature).Underlying", Method, 5, ""}, + {"(*Signature).Variadic", Method, 5, ""}, + {"(*Slice).Elem", Method, 5, ""}, + {"(*Slice).String", Method, 5, ""}, + {"(*Slice).Underlying", Method, 5, ""}, + {"(*StdSizes).Alignof", Method, 5, ""}, + {"(*StdSizes).Offsetsof", Method, 5, ""}, + {"(*StdSizes).Sizeof", Method, 5, ""}, + {"(*Struct).Field", Method, 5, ""}, + {"(*Struct).Fields", Method, 24, ""}, + {"(*Struct).NumFields", Method, 5, ""}, + {"(*Struct).String", Method, 5, ""}, + {"(*Struct).Tag", Method, 5, ""}, + {"(*Struct).Underlying", Method, 5, ""}, + {"(*Term).String", Method, 18, ""}, + {"(*Term).Tilde", Method, 18, ""}, + {"(*Term).Type", Method, 18, ""}, + {"(*Tuple).At", Method, 5, ""}, + {"(*Tuple).Len", Method, 5, ""}, + {"(*Tuple).String", Method, 5, ""}, + {"(*Tuple).Underlying", Method, 5, ""}, + {"(*Tuple).Variables", Method, 24, ""}, + {"(*TypeList).At", Method, 18, ""}, + {"(*TypeList).Len", Method, 18, ""}, + {"(*TypeList).Types", Method, 24, ""}, + {"(*TypeName).Exported", Method, 5, ""}, + {"(*TypeName).Id", Method, 5, ""}, + {"(*TypeName).IsAlias", Method, 9, ""}, + {"(*TypeName).Name", Method, 5, ""}, + {"(*TypeName).Parent", Method, 5, ""}, + {"(*TypeName).Pkg", Method, 5, ""}, + {"(*TypeName).Pos", Method, 5, ""}, + {"(*TypeName).String", Method, 5, ""}, + {"(*TypeName).Type", Method, 5, ""}, + {"(*TypeParam).Constraint", Method, 18, ""}, + {"(*TypeParam).Index", Method, 18, ""}, + {"(*TypeParam).Obj", Method, 18, ""}, + {"(*TypeParam).SetConstraint", Method, 18, ""}, + {"(*TypeParam).String", Method, 18, ""}, + {"(*TypeParam).Underlying", Method, 18, ""}, + {"(*TypeParamList).At", Method, 18, ""}, + {"(*TypeParamList).Len", Method, 18, ""}, + {"(*TypeParamList).TypeParams", Method, 24, ""}, + {"(*Union).Len", Method, 18, ""}, + {"(*Union).String", Method, 18, ""}, + {"(*Union).Term", Method, 18, ""}, + {"(*Union).Terms", Method, 24, ""}, + {"(*Union).Underlying", Method, 18, ""}, + {"(*Var).Anonymous", Method, 5, ""}, + {"(*Var).Embedded", Method, 11, ""}, + {"(*Var).Exported", Method, 5, ""}, + {"(*Var).Id", Method, 5, ""}, + {"(*Var).IsField", Method, 5, ""}, + {"(*Var).Kind", Method, 25, ""}, + {"(*Var).Name", Method, 5, ""}, + {"(*Var).Origin", Method, 19, ""}, + {"(*Var).Parent", Method, 5, ""}, + {"(*Var).Pkg", Method, 5, ""}, + {"(*Var).Pos", Method, 5, ""}, + {"(*Var).SetKind", Method, 25, ""}, + {"(*Var).String", Method, 5, ""}, + {"(*Var).Type", Method, 5, ""}, + {"(Checker).ObjectOf", Method, 5, ""}, + {"(Checker).PkgNameOf", Method, 22, ""}, + {"(Checker).TypeOf", Method, 5, ""}, + {"(Error).Error", Method, 5, ""}, + {"(TypeAndValue).Addressable", Method, 5, ""}, + {"(TypeAndValue).Assignable", Method, 5, ""}, + {"(TypeAndValue).HasOk", Method, 5, ""}, + {"(TypeAndValue).IsBuiltin", Method, 5, ""}, + {"(TypeAndValue).IsNil", Method, 5, ""}, + {"(TypeAndValue).IsType", Method, 5, ""}, + {"(TypeAndValue).IsValue", Method, 5, ""}, + {"(TypeAndValue).IsVoid", Method, 5, ""}, + {"(VarKind).String", Method, 25, ""}, + {"Alias", Type, 22, ""}, + {"ArgumentError", Type, 18, ""}, + {"ArgumentError.Err", Field, 18, ""}, + {"ArgumentError.Index", Field, 18, ""}, + {"Array", Type, 5, ""}, + {"AssertableTo", Func, 5, "func(V *Interface, T Type) bool"}, + {"AssignableTo", Func, 5, "func(V Type, T Type) bool"}, + {"Basic", Type, 5, ""}, + {"BasicInfo", Type, 5, ""}, + {"BasicKind", Type, 5, ""}, + {"Bool", Const, 5, ""}, + {"Builtin", Type, 5, ""}, + {"Byte", Const, 5, ""}, + {"Chan", Type, 5, ""}, + {"ChanDir", Type, 5, ""}, + {"CheckExpr", Func, 13, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr ast.Expr, info *Info) (err error)"}, + {"Checker", Type, 5, ""}, + {"Checker.Info", Field, 5, ""}, + {"Comparable", Func, 5, "func(T Type) bool"}, + {"Complex128", Const, 5, ""}, + {"Complex64", Const, 5, ""}, + {"Config", Type, 5, ""}, + {"Config.Context", Field, 18, ""}, + {"Config.DisableUnusedImportCheck", Field, 5, ""}, + {"Config.Error", Field, 5, ""}, + {"Config.FakeImportC", Field, 5, ""}, + {"Config.GoVersion", Field, 18, ""}, + {"Config.IgnoreFuncBodies", Field, 5, ""}, + {"Config.Importer", Field, 5, ""}, + {"Config.Sizes", Field, 5, ""}, + {"Const", Type, 5, ""}, + {"Context", Type, 18, ""}, + {"ConvertibleTo", Func, 5, "func(V Type, T Type) bool"}, + {"DefPredeclaredTestFuncs", Func, 5, "func()"}, + {"Default", Func, 8, "func(t Type) Type"}, + {"Error", Type, 5, ""}, + {"Error.Fset", Field, 5, ""}, + {"Error.Msg", Field, 5, ""}, + {"Error.Pos", Field, 5, ""}, + {"Error.Soft", Field, 5, ""}, + {"Eval", Func, 5, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (_ TypeAndValue, err error)"}, + {"ExprString", Func, 5, "func(x ast.Expr) string"}, + {"FieldVal", Const, 5, ""}, + {"FieldVar", Const, 25, ""}, + {"Float32", Const, 5, ""}, + {"Float64", Const, 5, ""}, + {"Func", Type, 5, ""}, + {"Id", Func, 5, "func(pkg *Package, name string) string"}, + {"Identical", Func, 5, "func(x Type, y Type) bool"}, + {"IdenticalIgnoreTags", Func, 8, "func(x Type, y Type) bool"}, + {"Implements", Func, 5, "func(V Type, T *Interface) bool"}, + {"ImportMode", Type, 6, ""}, + {"Importer", Type, 5, ""}, + {"ImporterFrom", Type, 6, ""}, + {"Info", Type, 5, ""}, + {"Info.Defs", Field, 5, ""}, + {"Info.FileVersions", Field, 22, ""}, + {"Info.Implicits", Field, 5, ""}, + {"Info.InitOrder", Field, 5, ""}, + {"Info.Instances", Field, 18, ""}, + {"Info.Scopes", Field, 5, ""}, + {"Info.Selections", Field, 5, ""}, + {"Info.Types", Field, 5, ""}, + {"Info.Uses", Field, 5, ""}, + {"Initializer", Type, 5, ""}, + {"Initializer.Lhs", Field, 5, ""}, + {"Initializer.Rhs", Field, 5, ""}, + {"Instance", Type, 18, ""}, + {"Instance.Type", Field, 18, ""}, + {"Instance.TypeArgs", Field, 18, ""}, + {"Instantiate", Func, 18, "func(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error)"}, + {"Int", Const, 5, ""}, + {"Int16", Const, 5, ""}, + {"Int32", Const, 5, ""}, + {"Int64", Const, 5, ""}, + {"Int8", Const, 5, ""}, + {"Interface", Type, 5, ""}, + {"Invalid", Const, 5, ""}, + {"IsBoolean", Const, 5, ""}, + {"IsComplex", Const, 5, ""}, + {"IsConstType", Const, 5, ""}, + {"IsFloat", Const, 5, ""}, + {"IsInteger", Const, 5, ""}, + {"IsInterface", Func, 5, "func(t Type) bool"}, + {"IsNumeric", Const, 5, ""}, + {"IsOrdered", Const, 5, ""}, + {"IsString", Const, 5, ""}, + {"IsUnsigned", Const, 5, ""}, + {"IsUntyped", Const, 5, ""}, + {"Label", Type, 5, ""}, + {"LocalVar", Const, 25, ""}, + {"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"}, + {"LookupSelection", Func, 25, ""}, + {"Map", Type, 5, ""}, + {"MethodExpr", Const, 5, ""}, + {"MethodSet", Type, 5, ""}, + {"MethodVal", Const, 5, ""}, + {"MissingMethod", Func, 5, "func(V Type, T *Interface, static bool) (method *Func, wrongType bool)"}, + {"Named", Type, 5, ""}, + {"NewAlias", Func, 22, "func(obj *TypeName, rhs Type) *Alias"}, + {"NewArray", Func, 5, "func(elem Type, len int64) *Array"}, + {"NewChan", Func, 5, "func(dir ChanDir, elem Type) *Chan"}, + {"NewChecker", Func, 5, "func(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker"}, + {"NewConst", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const"}, + {"NewContext", Func, 18, "func() *Context"}, + {"NewField", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, embedded bool) *Var"}, + {"NewFunc", Func, 5, "func(pos token.Pos, pkg *Package, name string, sig *Signature) *Func"}, + {"NewInterface", Func, 5, "func(methods []*Func, embeddeds []*Named) *Interface"}, + {"NewInterfaceType", Func, 11, "func(methods []*Func, embeddeds []Type) *Interface"}, + {"NewLabel", Func, 5, "func(pos token.Pos, pkg *Package, name string) *Label"}, + {"NewMap", Func, 5, "func(key Type, elem Type) *Map"}, + {"NewMethodSet", Func, 5, "func(T Type) *MethodSet"}, + {"NewNamed", Func, 5, "func(obj *TypeName, underlying Type, methods []*Func) *Named"}, + {"NewPackage", Func, 5, "func(path string, name string) *Package"}, + {"NewParam", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"}, + {"NewPkgName", Func, 5, "func(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName"}, + {"NewPointer", Func, 5, "func(elem Type) *Pointer"}, + {"NewScope", Func, 5, "func(parent *Scope, pos token.Pos, end token.Pos, comment string) *Scope"}, + {"NewSignature", Func, 5, "func(recv *Var, params *Tuple, results *Tuple, variadic bool) *Signature"}, + {"NewSignatureType", Func, 18, "func(recv *Var, recvTypeParams []*TypeParam, typeParams []*TypeParam, params *Tuple, results *Tuple, variadic bool) *Signature"}, + {"NewSlice", Func, 5, "func(elem Type) *Slice"}, + {"NewStruct", Func, 5, "func(fields []*Var, tags []string) *Struct"}, + {"NewTerm", Func, 18, "func(tilde bool, typ Type) *Term"}, + {"NewTuple", Func, 5, "func(x ...*Var) *Tuple"}, + {"NewTypeName", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *TypeName"}, + {"NewTypeParam", Func, 18, "func(obj *TypeName, constraint Type) *TypeParam"}, + {"NewUnion", Func, 18, "func(terms []*Term) *Union"}, + {"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"}, + {"Nil", Type, 5, ""}, + {"Object", Type, 5, ""}, + {"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"}, + {"Package", Type, 5, ""}, + {"PackageVar", Const, 25, ""}, + {"ParamVar", Const, 25, ""}, + {"PkgName", Type, 5, ""}, + {"Pointer", Type, 5, ""}, + {"Qualifier", Type, 5, ""}, + {"RecvOnly", Const, 5, ""}, + {"RecvVar", Const, 25, ""}, + {"RelativeTo", Func, 5, "func(pkg *Package) Qualifier"}, + {"ResultVar", Const, 25, ""}, + {"Rune", Const, 5, ""}, + {"Satisfies", Func, 20, "func(V Type, T *Interface) bool"}, + {"Scope", Type, 5, ""}, + {"Selection", Type, 5, ""}, + {"SelectionKind", Type, 5, ""}, + {"SelectionString", Func, 5, "func(s *Selection, qf Qualifier) string"}, + {"SendOnly", Const, 5, ""}, + {"SendRecv", Const, 5, ""}, + {"Signature", Type, 5, ""}, + {"Sizes", Type, 5, ""}, + {"SizesFor", Func, 9, "func(compiler string, arch string) Sizes"}, + {"Slice", Type, 5, ""}, + {"StdSizes", Type, 5, ""}, + {"StdSizes.MaxAlign", Field, 5, ""}, + {"StdSizes.WordSize", Field, 5, ""}, + {"String", Const, 5, ""}, + {"Struct", Type, 5, ""}, + {"Term", Type, 18, ""}, + {"Tuple", Type, 5, ""}, + {"Typ", Var, 5, ""}, + {"Type", Type, 5, ""}, + {"TypeAndValue", Type, 5, ""}, + {"TypeAndValue.Type", Field, 5, ""}, + {"TypeAndValue.Value", Field, 5, ""}, + {"TypeList", Type, 18, ""}, + {"TypeName", Type, 5, ""}, + {"TypeParam", Type, 18, ""}, + {"TypeParamList", Type, 18, ""}, + {"TypeString", Func, 5, "func(typ Type, qf Qualifier) string"}, + {"Uint", Const, 5, ""}, + {"Uint16", Const, 5, ""}, + {"Uint32", Const, 5, ""}, + {"Uint64", Const, 5, ""}, + {"Uint8", Const, 5, ""}, + {"Uintptr", Const, 5, ""}, + {"Unalias", Func, 22, "func(t Type) Type"}, + {"Union", Type, 18, ""}, + {"Universe", Var, 5, ""}, + {"Unsafe", Var, 5, ""}, + {"UnsafePointer", Const, 5, ""}, + {"UntypedBool", Const, 5, ""}, + {"UntypedComplex", Const, 5, ""}, + {"UntypedFloat", Const, 5, ""}, + {"UntypedInt", Const, 5, ""}, + {"UntypedNil", Const, 5, ""}, + {"UntypedRune", Const, 5, ""}, + {"UntypedString", Const, 5, ""}, + {"Var", Type, 5, ""}, + {"VarKind", Type, 25, ""}, + {"WriteExpr", Func, 5, "func(buf *bytes.Buffer, x ast.Expr)"}, + {"WriteSignature", Func, 5, "func(buf *bytes.Buffer, sig *Signature, qf Qualifier)"}, + {"WriteType", Func, 5, "func(buf *bytes.Buffer, typ Type, qf Qualifier)"}, + }, + "go/version": { + {"Compare", Func, 22, "func(x string, y string) int"}, + {"IsValid", Func, 22, "func(x string) bool"}, + {"Lang", Func, 22, "func(x string) string"}, + }, + "hash": { + {"Hash", Type, 0, ""}, + {"Hash32", Type, 0, ""}, + {"Hash64", Type, 0, ""}, + }, + "hash/adler32": { + {"Checksum", Func, 0, "func(data []byte) uint32"}, + {"New", Func, 0, "func() hash.Hash32"}, + {"Size", Const, 0, ""}, + }, + "hash/crc32": { + {"Castagnoli", Const, 0, ""}, + {"Checksum", Func, 0, "func(data []byte, tab *Table) uint32"}, + {"ChecksumIEEE", Func, 0, "func(data []byte) uint32"}, + {"IEEE", Const, 0, ""}, + {"IEEETable", Var, 0, ""}, + {"Koopman", Const, 0, ""}, + {"MakeTable", Func, 0, "func(poly uint32) *Table"}, + {"New", Func, 0, "func(tab *Table) hash.Hash32"}, + {"NewIEEE", Func, 0, "func() hash.Hash32"}, + {"Size", Const, 0, ""}, + {"Table", Type, 0, ""}, + {"Update", Func, 0, "func(crc uint32, tab *Table, p []byte) uint32"}, + }, + "hash/crc64": { + {"Checksum", Func, 0, "func(data []byte, tab *Table) uint64"}, + {"ECMA", Const, 0, ""}, + {"ISO", Const, 0, ""}, + {"MakeTable", Func, 0, "func(poly uint64) *Table"}, + {"New", Func, 0, "func(tab *Table) hash.Hash64"}, + {"Size", Const, 0, ""}, + {"Table", Type, 0, ""}, + {"Update", Func, 0, "func(crc uint64, tab *Table, p []byte) uint64"}, + }, + "hash/fnv": { + {"New128", Func, 9, "func() hash.Hash"}, + {"New128a", Func, 9, "func() hash.Hash"}, + {"New32", Func, 0, "func() hash.Hash32"}, + {"New32a", Func, 0, "func() hash.Hash32"}, + {"New64", Func, 0, "func() hash.Hash64"}, + {"New64a", Func, 0, "func() hash.Hash64"}, + }, + "hash/maphash": { + {"(*Hash).BlockSize", Method, 14, ""}, + {"(*Hash).Reset", Method, 14, ""}, + {"(*Hash).Seed", Method, 14, ""}, + {"(*Hash).SetSeed", Method, 14, ""}, + {"(*Hash).Size", Method, 14, ""}, + {"(*Hash).Sum", Method, 14, ""}, + {"(*Hash).Sum64", Method, 14, ""}, + {"(*Hash).Write", Method, 14, ""}, + {"(*Hash).WriteByte", Method, 14, ""}, + {"(*Hash).WriteString", Method, 14, ""}, + {"Bytes", Func, 19, "func(seed Seed, b []byte) uint64"}, + {"Comparable", Func, 24, "func[T comparable](seed Seed, v T) uint64"}, + {"Hash", Type, 14, ""}, + {"MakeSeed", Func, 14, "func() Seed"}, + {"Seed", Type, 14, ""}, + {"String", Func, 19, "func(seed Seed, s string) uint64"}, + {"WriteComparable", Func, 24, "func[T comparable](h *Hash, x T)"}, + }, + "html": { + {"EscapeString", Func, 0, "func(s string) string"}, + {"UnescapeString", Func, 0, "func(s string) string"}, + }, + "html/template": { + {"(*Error).Error", Method, 0, ""}, + {"(*Template).AddParseTree", Method, 0, ""}, + {"(*Template).Clone", Method, 0, ""}, + {"(*Template).DefinedTemplates", Method, 6, ""}, + {"(*Template).Delims", Method, 0, ""}, + {"(*Template).Execute", Method, 0, ""}, + {"(*Template).ExecuteTemplate", Method, 0, ""}, + {"(*Template).Funcs", Method, 0, ""}, + {"(*Template).Lookup", Method, 0, ""}, + {"(*Template).Name", Method, 0, ""}, + {"(*Template).New", Method, 0, ""}, + {"(*Template).Option", Method, 5, ""}, + {"(*Template).Parse", Method, 0, ""}, + {"(*Template).ParseFS", Method, 16, ""}, + {"(*Template).ParseFiles", Method, 0, ""}, + {"(*Template).ParseGlob", Method, 0, ""}, + {"(*Template).Templates", Method, 0, ""}, + {"CSS", Type, 0, ""}, + {"ErrAmbigContext", Const, 0, ""}, + {"ErrBadHTML", Const, 0, ""}, + {"ErrBranchEnd", Const, 0, ""}, + {"ErrEndContext", Const, 0, ""}, + {"ErrJSTemplate", Const, 21, ""}, + {"ErrNoSuchTemplate", Const, 0, ""}, + {"ErrOutputContext", Const, 0, ""}, + {"ErrPartialCharset", Const, 0, ""}, + {"ErrPartialEscape", Const, 0, ""}, + {"ErrPredefinedEscaper", Const, 9, ""}, + {"ErrRangeLoopReentry", Const, 0, ""}, + {"ErrSlashAmbig", Const, 0, ""}, + {"Error", Type, 0, ""}, + {"Error.Description", Field, 0, ""}, + {"Error.ErrorCode", Field, 0, ""}, + {"Error.Line", Field, 0, ""}, + {"Error.Name", Field, 0, ""}, + {"Error.Node", Field, 4, ""}, + {"ErrorCode", Type, 0, ""}, + {"FuncMap", Type, 0, ""}, + {"HTML", Type, 0, ""}, + {"HTMLAttr", Type, 0, ""}, + {"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"}, + {"HTMLEscapeString", Func, 0, "func(s string) string"}, + {"HTMLEscaper", Func, 0, "func(args ...any) string"}, + {"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"}, + {"JS", Type, 0, ""}, + {"JSEscape", Func, 0, "func(w io.Writer, b []byte)"}, + {"JSEscapeString", Func, 0, "func(s string) string"}, + {"JSEscaper", Func, 0, "func(args ...any) string"}, + {"JSStr", Type, 0, ""}, + {"Must", Func, 0, "func(t *Template, err error) *Template"}, + {"New", Func, 0, "func(name string) *Template"}, + {"OK", Const, 0, ""}, + {"ParseFS", Func, 16, "func(fs fs.FS, patterns ...string) (*Template, error)"}, + {"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"}, + {"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"}, + {"Srcset", Type, 10, ""}, + {"Template", Type, 0, ""}, + {"Template.Tree", Field, 2, ""}, + {"URL", Type, 0, ""}, + {"URLQueryEscaper", Func, 0, "func(args ...any) string"}, + }, + "image": { + {"(*Alpha).AlphaAt", Method, 4, ""}, + {"(*Alpha).At", Method, 0, ""}, + {"(*Alpha).Bounds", Method, 0, ""}, + {"(*Alpha).ColorModel", Method, 0, ""}, + {"(*Alpha).Opaque", Method, 0, ""}, + {"(*Alpha).PixOffset", Method, 0, ""}, + {"(*Alpha).RGBA64At", Method, 17, ""}, + {"(*Alpha).Set", Method, 0, ""}, + {"(*Alpha).SetAlpha", Method, 0, ""}, + {"(*Alpha).SetRGBA64", Method, 17, ""}, + {"(*Alpha).SubImage", Method, 0, ""}, + {"(*Alpha16).Alpha16At", Method, 4, ""}, + {"(*Alpha16).At", Method, 0, ""}, + {"(*Alpha16).Bounds", Method, 0, ""}, + {"(*Alpha16).ColorModel", Method, 0, ""}, + {"(*Alpha16).Opaque", Method, 0, ""}, + {"(*Alpha16).PixOffset", Method, 0, ""}, + {"(*Alpha16).RGBA64At", Method, 17, ""}, + {"(*Alpha16).Set", Method, 0, ""}, + {"(*Alpha16).SetAlpha16", Method, 0, ""}, + {"(*Alpha16).SetRGBA64", Method, 17, ""}, + {"(*Alpha16).SubImage", Method, 0, ""}, + {"(*CMYK).At", Method, 5, ""}, + {"(*CMYK).Bounds", Method, 5, ""}, + {"(*CMYK).CMYKAt", Method, 5, ""}, + {"(*CMYK).ColorModel", Method, 5, ""}, + {"(*CMYK).Opaque", Method, 5, ""}, + {"(*CMYK).PixOffset", Method, 5, ""}, + {"(*CMYK).RGBA64At", Method, 17, ""}, + {"(*CMYK).Set", Method, 5, ""}, + {"(*CMYK).SetCMYK", Method, 5, ""}, + {"(*CMYK).SetRGBA64", Method, 17, ""}, + {"(*CMYK).SubImage", Method, 5, ""}, + {"(*Gray).At", Method, 0, ""}, + {"(*Gray).Bounds", Method, 0, ""}, + {"(*Gray).ColorModel", Method, 0, ""}, + {"(*Gray).GrayAt", Method, 4, ""}, + {"(*Gray).Opaque", Method, 0, ""}, + {"(*Gray).PixOffset", Method, 0, ""}, + {"(*Gray).RGBA64At", Method, 17, ""}, + {"(*Gray).Set", Method, 0, ""}, + {"(*Gray).SetGray", Method, 0, ""}, + {"(*Gray).SetRGBA64", Method, 17, ""}, + {"(*Gray).SubImage", Method, 0, ""}, + {"(*Gray16).At", Method, 0, ""}, + {"(*Gray16).Bounds", Method, 0, ""}, + {"(*Gray16).ColorModel", Method, 0, ""}, + {"(*Gray16).Gray16At", Method, 4, ""}, + {"(*Gray16).Opaque", Method, 0, ""}, + {"(*Gray16).PixOffset", Method, 0, ""}, + {"(*Gray16).RGBA64At", Method, 17, ""}, + {"(*Gray16).Set", Method, 0, ""}, + {"(*Gray16).SetGray16", Method, 0, ""}, + {"(*Gray16).SetRGBA64", Method, 17, ""}, + {"(*Gray16).SubImage", Method, 0, ""}, + {"(*NRGBA).At", Method, 0, ""}, + {"(*NRGBA).Bounds", Method, 0, ""}, + {"(*NRGBA).ColorModel", Method, 0, ""}, + {"(*NRGBA).NRGBAAt", Method, 4, ""}, + {"(*NRGBA).Opaque", Method, 0, ""}, + {"(*NRGBA).PixOffset", Method, 0, ""}, + {"(*NRGBA).RGBA64At", Method, 17, ""}, + {"(*NRGBA).Set", Method, 0, ""}, + {"(*NRGBA).SetNRGBA", Method, 0, ""}, + {"(*NRGBA).SetRGBA64", Method, 17, ""}, + {"(*NRGBA).SubImage", Method, 0, ""}, + {"(*NRGBA64).At", Method, 0, ""}, + {"(*NRGBA64).Bounds", Method, 0, ""}, + {"(*NRGBA64).ColorModel", Method, 0, ""}, + {"(*NRGBA64).NRGBA64At", Method, 4, ""}, + {"(*NRGBA64).Opaque", Method, 0, ""}, + {"(*NRGBA64).PixOffset", Method, 0, ""}, + {"(*NRGBA64).RGBA64At", Method, 17, ""}, + {"(*NRGBA64).Set", Method, 0, ""}, + {"(*NRGBA64).SetNRGBA64", Method, 0, ""}, + {"(*NRGBA64).SetRGBA64", Method, 17, ""}, + {"(*NRGBA64).SubImage", Method, 0, ""}, + {"(*NYCbCrA).AOffset", Method, 6, ""}, + {"(*NYCbCrA).At", Method, 6, ""}, + {"(*NYCbCrA).Bounds", Method, 6, ""}, + {"(*NYCbCrA).COffset", Method, 6, ""}, + {"(*NYCbCrA).ColorModel", Method, 6, ""}, + {"(*NYCbCrA).NYCbCrAAt", Method, 6, ""}, + {"(*NYCbCrA).Opaque", Method, 6, ""}, + {"(*NYCbCrA).RGBA64At", Method, 17, ""}, + {"(*NYCbCrA).SubImage", Method, 6, ""}, + {"(*NYCbCrA).YCbCrAt", Method, 6, ""}, + {"(*NYCbCrA).YOffset", Method, 6, ""}, + {"(*Paletted).At", Method, 0, ""}, + {"(*Paletted).Bounds", Method, 0, ""}, + {"(*Paletted).ColorIndexAt", Method, 0, ""}, + {"(*Paletted).ColorModel", Method, 0, ""}, + {"(*Paletted).Opaque", Method, 0, ""}, + {"(*Paletted).PixOffset", Method, 0, ""}, + {"(*Paletted).RGBA64At", Method, 17, ""}, + {"(*Paletted).Set", Method, 0, ""}, + {"(*Paletted).SetColorIndex", Method, 0, ""}, + {"(*Paletted).SetRGBA64", Method, 17, ""}, + {"(*Paletted).SubImage", Method, 0, ""}, + {"(*RGBA).At", Method, 0, ""}, + {"(*RGBA).Bounds", Method, 0, ""}, + {"(*RGBA).ColorModel", Method, 0, ""}, + {"(*RGBA).Opaque", Method, 0, ""}, + {"(*RGBA).PixOffset", Method, 0, ""}, + {"(*RGBA).RGBA64At", Method, 17, ""}, + {"(*RGBA).RGBAAt", Method, 4, ""}, + {"(*RGBA).Set", Method, 0, ""}, + {"(*RGBA).SetRGBA", Method, 0, ""}, + {"(*RGBA).SetRGBA64", Method, 17, ""}, + {"(*RGBA).SubImage", Method, 0, ""}, + {"(*RGBA64).At", Method, 0, ""}, + {"(*RGBA64).Bounds", Method, 0, ""}, + {"(*RGBA64).ColorModel", Method, 0, ""}, + {"(*RGBA64).Opaque", Method, 0, ""}, + {"(*RGBA64).PixOffset", Method, 0, ""}, + {"(*RGBA64).RGBA64At", Method, 4, ""}, + {"(*RGBA64).Set", Method, 0, ""}, + {"(*RGBA64).SetRGBA64", Method, 0, ""}, + {"(*RGBA64).SubImage", Method, 0, ""}, + {"(*Uniform).At", Method, 0, ""}, + {"(*Uniform).Bounds", Method, 0, ""}, + {"(*Uniform).ColorModel", Method, 0, ""}, + {"(*Uniform).Convert", Method, 0, ""}, + {"(*Uniform).Opaque", Method, 0, ""}, + {"(*Uniform).RGBA", Method, 0, ""}, + {"(*Uniform).RGBA64At", Method, 17, ""}, + {"(*YCbCr).At", Method, 0, ""}, + {"(*YCbCr).Bounds", Method, 0, ""}, + {"(*YCbCr).COffset", Method, 0, ""}, + {"(*YCbCr).ColorModel", Method, 0, ""}, + {"(*YCbCr).Opaque", Method, 0, ""}, + {"(*YCbCr).RGBA64At", Method, 17, ""}, + {"(*YCbCr).SubImage", Method, 0, ""}, + {"(*YCbCr).YCbCrAt", Method, 4, ""}, + {"(*YCbCr).YOffset", Method, 0, ""}, + {"(Point).Add", Method, 0, ""}, + {"(Point).Div", Method, 0, ""}, + {"(Point).Eq", Method, 0, ""}, + {"(Point).In", Method, 0, ""}, + {"(Point).Mod", Method, 0, ""}, + {"(Point).Mul", Method, 0, ""}, + {"(Point).String", Method, 0, ""}, + {"(Point).Sub", Method, 0, ""}, + {"(Rectangle).Add", Method, 0, ""}, + {"(Rectangle).At", Method, 5, ""}, + {"(Rectangle).Bounds", Method, 5, ""}, + {"(Rectangle).Canon", Method, 0, ""}, + {"(Rectangle).ColorModel", Method, 5, ""}, + {"(Rectangle).Dx", Method, 0, ""}, + {"(Rectangle).Dy", Method, 0, ""}, + {"(Rectangle).Empty", Method, 0, ""}, + {"(Rectangle).Eq", Method, 0, ""}, + {"(Rectangle).In", Method, 0, ""}, + {"(Rectangle).Inset", Method, 0, ""}, + {"(Rectangle).Intersect", Method, 0, ""}, + {"(Rectangle).Overlaps", Method, 0, ""}, + {"(Rectangle).RGBA64At", Method, 17, ""}, + {"(Rectangle).Size", Method, 0, ""}, + {"(Rectangle).String", Method, 0, ""}, + {"(Rectangle).Sub", Method, 0, ""}, + {"(Rectangle).Union", Method, 0, ""}, + {"(YCbCrSubsampleRatio).String", Method, 0, ""}, + {"Alpha", Type, 0, ""}, + {"Alpha.Pix", Field, 0, ""}, + {"Alpha.Rect", Field, 0, ""}, + {"Alpha.Stride", Field, 0, ""}, + {"Alpha16", Type, 0, ""}, + {"Alpha16.Pix", Field, 0, ""}, + {"Alpha16.Rect", Field, 0, ""}, + {"Alpha16.Stride", Field, 0, ""}, + {"Black", Var, 0, ""}, + {"CMYK", Type, 5, ""}, + {"CMYK.Pix", Field, 5, ""}, + {"CMYK.Rect", Field, 5, ""}, + {"CMYK.Stride", Field, 5, ""}, + {"Config", Type, 0, ""}, + {"Config.ColorModel", Field, 0, ""}, + {"Config.Height", Field, 0, ""}, + {"Config.Width", Field, 0, ""}, + {"Decode", Func, 0, "func(r io.Reader) (Image, string, error)"}, + {"DecodeConfig", Func, 0, "func(r io.Reader) (Config, string, error)"}, + {"ErrFormat", Var, 0, ""}, + {"Gray", Type, 0, ""}, + {"Gray.Pix", Field, 0, ""}, + {"Gray.Rect", Field, 0, ""}, + {"Gray.Stride", Field, 0, ""}, + {"Gray16", Type, 0, ""}, + {"Gray16.Pix", Field, 0, ""}, + {"Gray16.Rect", Field, 0, ""}, + {"Gray16.Stride", Field, 0, ""}, + {"Image", Type, 0, ""}, + {"NRGBA", Type, 0, ""}, + {"NRGBA.Pix", Field, 0, ""}, + {"NRGBA.Rect", Field, 0, ""}, + {"NRGBA.Stride", Field, 0, ""}, + {"NRGBA64", Type, 0, ""}, + {"NRGBA64.Pix", Field, 0, ""}, + {"NRGBA64.Rect", Field, 0, ""}, + {"NRGBA64.Stride", Field, 0, ""}, + {"NYCbCrA", Type, 6, ""}, + {"NYCbCrA.A", Field, 6, ""}, + {"NYCbCrA.AStride", Field, 6, ""}, + {"NYCbCrA.YCbCr", Field, 6, ""}, + {"NewAlpha", Func, 0, "func(r Rectangle) *Alpha"}, + {"NewAlpha16", Func, 0, "func(r Rectangle) *Alpha16"}, + {"NewCMYK", Func, 5, "func(r Rectangle) *CMYK"}, + {"NewGray", Func, 0, "func(r Rectangle) *Gray"}, + {"NewGray16", Func, 0, "func(r Rectangle) *Gray16"}, + {"NewNRGBA", Func, 0, "func(r Rectangle) *NRGBA"}, + {"NewNRGBA64", Func, 0, "func(r Rectangle) *NRGBA64"}, + {"NewNYCbCrA", Func, 6, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA"}, + {"NewPaletted", Func, 0, "func(r Rectangle, p color.Palette) *Paletted"}, + {"NewRGBA", Func, 0, "func(r Rectangle) *RGBA"}, + {"NewRGBA64", Func, 0, "func(r Rectangle) *RGBA64"}, + {"NewUniform", Func, 0, "func(c color.Color) *Uniform"}, + {"NewYCbCr", Func, 0, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr"}, + {"Opaque", Var, 0, ""}, + {"Paletted", Type, 0, ""}, + {"Paletted.Palette", Field, 0, ""}, + {"Paletted.Pix", Field, 0, ""}, + {"Paletted.Rect", Field, 0, ""}, + {"Paletted.Stride", Field, 0, ""}, + {"PalettedImage", Type, 0, ""}, + {"Point", Type, 0, ""}, + {"Point.X", Field, 0, ""}, + {"Point.Y", Field, 0, ""}, + {"Pt", Func, 0, "func(X int, Y int) Point"}, + {"RGBA", Type, 0, ""}, + {"RGBA.Pix", Field, 0, ""}, + {"RGBA.Rect", Field, 0, ""}, + {"RGBA.Stride", Field, 0, ""}, + {"RGBA64", Type, 0, ""}, + {"RGBA64.Pix", Field, 0, ""}, + {"RGBA64.Rect", Field, 0, ""}, + {"RGBA64.Stride", Field, 0, ""}, + {"RGBA64Image", Type, 17, ""}, + {"Rect", Func, 0, "func(x0 int, y0 int, x1 int, y1 int) Rectangle"}, + {"Rectangle", Type, 0, ""}, + {"Rectangle.Max", Field, 0, ""}, + {"Rectangle.Min", Field, 0, ""}, + {"RegisterFormat", Func, 0, "func(name string, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error))"}, + {"Transparent", Var, 0, ""}, + {"Uniform", Type, 0, ""}, + {"Uniform.C", Field, 0, ""}, + {"White", Var, 0, ""}, + {"YCbCr", Type, 0, ""}, + {"YCbCr.CStride", Field, 0, ""}, + {"YCbCr.Cb", Field, 0, ""}, + {"YCbCr.Cr", Field, 0, ""}, + {"YCbCr.Rect", Field, 0, ""}, + {"YCbCr.SubsampleRatio", Field, 0, ""}, + {"YCbCr.Y", Field, 0, ""}, + {"YCbCr.YStride", Field, 0, ""}, + {"YCbCrSubsampleRatio", Type, 0, ""}, + {"YCbCrSubsampleRatio410", Const, 5, ""}, + {"YCbCrSubsampleRatio411", Const, 5, ""}, + {"YCbCrSubsampleRatio420", Const, 0, ""}, + {"YCbCrSubsampleRatio422", Const, 0, ""}, + {"YCbCrSubsampleRatio440", Const, 1, ""}, + {"YCbCrSubsampleRatio444", Const, 0, ""}, + {"ZP", Var, 0, ""}, + {"ZR", Var, 0, ""}, + }, + "image/color": { + {"(Alpha).RGBA", Method, 0, ""}, + {"(Alpha16).RGBA", Method, 0, ""}, + {"(CMYK).RGBA", Method, 5, ""}, + {"(Gray).RGBA", Method, 0, ""}, + {"(Gray16).RGBA", Method, 0, ""}, + {"(NRGBA).RGBA", Method, 0, ""}, + {"(NRGBA64).RGBA", Method, 0, ""}, + {"(NYCbCrA).RGBA", Method, 6, ""}, + {"(Palette).Convert", Method, 0, ""}, + {"(Palette).Index", Method, 0, ""}, + {"(RGBA).RGBA", Method, 0, ""}, + {"(RGBA64).RGBA", Method, 0, ""}, + {"(YCbCr).RGBA", Method, 0, ""}, + {"Alpha", Type, 0, ""}, + {"Alpha.A", Field, 0, ""}, + {"Alpha16", Type, 0, ""}, + {"Alpha16.A", Field, 0, ""}, + {"Alpha16Model", Var, 0, ""}, + {"AlphaModel", Var, 0, ""}, + {"Black", Var, 0, ""}, + {"CMYK", Type, 5, ""}, + {"CMYK.C", Field, 5, ""}, + {"CMYK.K", Field, 5, ""}, + {"CMYK.M", Field, 5, ""}, + {"CMYK.Y", Field, 5, ""}, + {"CMYKModel", Var, 5, ""}, + {"CMYKToRGB", Func, 5, "func(c uint8, m uint8, y uint8, k uint8) (uint8, uint8, uint8)"}, + {"Color", Type, 0, ""}, + {"Gray", Type, 0, ""}, + {"Gray.Y", Field, 0, ""}, + {"Gray16", Type, 0, ""}, + {"Gray16.Y", Field, 0, ""}, + {"Gray16Model", Var, 0, ""}, + {"GrayModel", Var, 0, ""}, + {"Model", Type, 0, ""}, + {"ModelFunc", Func, 0, "func(f func(Color) Color) Model"}, + {"NRGBA", Type, 0, ""}, + {"NRGBA.A", Field, 0, ""}, + {"NRGBA.B", Field, 0, ""}, + {"NRGBA.G", Field, 0, ""}, + {"NRGBA.R", Field, 0, ""}, + {"NRGBA64", Type, 0, ""}, + {"NRGBA64.A", Field, 0, ""}, + {"NRGBA64.B", Field, 0, ""}, + {"NRGBA64.G", Field, 0, ""}, + {"NRGBA64.R", Field, 0, ""}, + {"NRGBA64Model", Var, 0, ""}, + {"NRGBAModel", Var, 0, ""}, + {"NYCbCrA", Type, 6, ""}, + {"NYCbCrA.A", Field, 6, ""}, + {"NYCbCrA.YCbCr", Field, 6, ""}, + {"NYCbCrAModel", Var, 6, ""}, + {"Opaque", Var, 0, ""}, + {"Palette", Type, 0, ""}, + {"RGBA", Type, 0, ""}, + {"RGBA.A", Field, 0, ""}, + {"RGBA.B", Field, 0, ""}, + {"RGBA.G", Field, 0, ""}, + {"RGBA.R", Field, 0, ""}, + {"RGBA64", Type, 0, ""}, + {"RGBA64.A", Field, 0, ""}, + {"RGBA64.B", Field, 0, ""}, + {"RGBA64.G", Field, 0, ""}, + {"RGBA64.R", Field, 0, ""}, + {"RGBA64Model", Var, 0, ""}, + {"RGBAModel", Var, 0, ""}, + {"RGBToCMYK", Func, 5, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8, uint8)"}, + {"RGBToYCbCr", Func, 0, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8)"}, + {"Transparent", Var, 0, ""}, + {"White", Var, 0, ""}, + {"YCbCr", Type, 0, ""}, + {"YCbCr.Cb", Field, 0, ""}, + {"YCbCr.Cr", Field, 0, ""}, + {"YCbCr.Y", Field, 0, ""}, + {"YCbCrModel", Var, 0, ""}, + {"YCbCrToRGB", Func, 0, "func(y uint8, cb uint8, cr uint8) (uint8, uint8, uint8)"}, + }, + "image/color/palette": { + {"Plan9", Var, 2, ""}, + {"WebSafe", Var, 2, ""}, + }, + "image/draw": { + {"(Op).Draw", Method, 2, ""}, + {"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"}, + {"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"}, + {"Drawer", Type, 2, ""}, + {"FloydSteinberg", Var, 2, ""}, + {"Image", Type, 0, ""}, + {"Op", Type, 0, ""}, + {"Over", Const, 0, ""}, + {"Quantizer", Type, 2, ""}, + {"RGBA64Image", Type, 17, ""}, + {"Src", Const, 0, ""}, + }, + "image/gif": { + {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"}, + {"DecodeAll", Func, 0, "func(r io.Reader) (*GIF, error)"}, + {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"}, + {"DisposalBackground", Const, 5, ""}, + {"DisposalNone", Const, 5, ""}, + {"DisposalPrevious", Const, 5, ""}, + {"Encode", Func, 2, "func(w io.Writer, m image.Image, o *Options) error"}, + {"EncodeAll", Func, 2, "func(w io.Writer, g *GIF) error"}, + {"GIF", Type, 0, ""}, + {"GIF.BackgroundIndex", Field, 5, ""}, + {"GIF.Config", Field, 5, ""}, + {"GIF.Delay", Field, 0, ""}, + {"GIF.Disposal", Field, 5, ""}, + {"GIF.Image", Field, 0, ""}, + {"GIF.LoopCount", Field, 0, ""}, + {"Options", Type, 2, ""}, + {"Options.Drawer", Field, 2, ""}, + {"Options.NumColors", Field, 2, ""}, + {"Options.Quantizer", Field, 2, ""}, + }, + "image/jpeg": { + {"(FormatError).Error", Method, 0, ""}, + {"(UnsupportedError).Error", Method, 0, ""}, + {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"}, + {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"}, + {"DefaultQuality", Const, 0, ""}, + {"Encode", Func, 0, "func(w io.Writer, m image.Image, o *Options) error"}, + {"FormatError", Type, 0, ""}, + {"Options", Type, 0, ""}, + {"Options.Quality", Field, 0, ""}, + {"Reader", Type, 0, ""}, + {"UnsupportedError", Type, 0, ""}, + }, + "image/png": { + {"(*Encoder).Encode", Method, 4, ""}, + {"(FormatError).Error", Method, 0, ""}, + {"(UnsupportedError).Error", Method, 0, ""}, + {"BestCompression", Const, 4, ""}, + {"BestSpeed", Const, 4, ""}, + {"CompressionLevel", Type, 4, ""}, + {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"}, + {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"}, + {"DefaultCompression", Const, 4, ""}, + {"Encode", Func, 0, "func(w io.Writer, m image.Image) error"}, + {"Encoder", Type, 4, ""}, + {"Encoder.BufferPool", Field, 9, ""}, + {"Encoder.CompressionLevel", Field, 4, ""}, + {"EncoderBuffer", Type, 9, ""}, + {"EncoderBufferPool", Type, 9, ""}, + {"FormatError", Type, 0, ""}, + {"NoCompression", Const, 4, ""}, + {"UnsupportedError", Type, 0, ""}, + }, + "index/suffixarray": { + {"(*Index).Bytes", Method, 0, ""}, + {"(*Index).FindAllIndex", Method, 0, ""}, + {"(*Index).Lookup", Method, 0, ""}, + {"(*Index).Read", Method, 0, ""}, + {"(*Index).Write", Method, 0, ""}, + {"Index", Type, 0, ""}, + {"New", Func, 0, "func(data []byte) *Index"}, + }, + "io": { + {"(*LimitedReader).Read", Method, 0, ""}, + {"(*OffsetWriter).Seek", Method, 20, ""}, + {"(*OffsetWriter).Write", Method, 20, ""}, + {"(*OffsetWriter).WriteAt", Method, 20, ""}, + {"(*PipeReader).Close", Method, 0, ""}, + {"(*PipeReader).CloseWithError", Method, 0, ""}, + {"(*PipeReader).Read", Method, 0, ""}, + {"(*PipeWriter).Close", Method, 0, ""}, + {"(*PipeWriter).CloseWithError", Method, 0, ""}, + {"(*PipeWriter).Write", Method, 0, ""}, + {"(*SectionReader).Outer", Method, 22, ""}, + {"(*SectionReader).Read", Method, 0, ""}, + {"(*SectionReader).ReadAt", Method, 0, ""}, + {"(*SectionReader).Seek", Method, 0, ""}, + {"(*SectionReader).Size", Method, 0, ""}, + {"ByteReader", Type, 0, ""}, + {"ByteScanner", Type, 0, ""}, + {"ByteWriter", Type, 1, ""}, + {"Closer", Type, 0, ""}, + {"Copy", Func, 0, "func(dst Writer, src Reader) (written int64, err error)"}, + {"CopyBuffer", Func, 5, "func(dst Writer, src Reader, buf []byte) (written int64, err error)"}, + {"CopyN", Func, 0, "func(dst Writer, src Reader, n int64) (written int64, err error)"}, + {"Discard", Var, 16, ""}, + {"EOF", Var, 0, ""}, + {"ErrClosedPipe", Var, 0, ""}, + {"ErrNoProgress", Var, 1, ""}, + {"ErrShortBuffer", Var, 0, ""}, + {"ErrShortWrite", Var, 0, ""}, + {"ErrUnexpectedEOF", Var, 0, ""}, + {"LimitReader", Func, 0, "func(r Reader, n int64) Reader"}, + {"LimitedReader", Type, 0, ""}, + {"LimitedReader.N", Field, 0, ""}, + {"LimitedReader.R", Field, 0, ""}, + {"MultiReader", Func, 0, "func(readers ...Reader) Reader"}, + {"MultiWriter", Func, 0, "func(writers ...Writer) Writer"}, + {"NewOffsetWriter", Func, 20, "func(w WriterAt, off int64) *OffsetWriter"}, + {"NewSectionReader", Func, 0, "func(r ReaderAt, off int64, n int64) *SectionReader"}, + {"NopCloser", Func, 16, "func(r Reader) ReadCloser"}, + {"OffsetWriter", Type, 20, ""}, + {"Pipe", Func, 0, "func() (*PipeReader, *PipeWriter)"}, + {"PipeReader", Type, 0, ""}, + {"PipeWriter", Type, 0, ""}, + {"ReadAll", Func, 16, "func(r Reader) ([]byte, error)"}, + {"ReadAtLeast", Func, 0, "func(r Reader, buf []byte, min int) (n int, err error)"}, + {"ReadCloser", Type, 0, ""}, + {"ReadFull", Func, 0, "func(r Reader, buf []byte) (n int, err error)"}, + {"ReadSeekCloser", Type, 16, ""}, + {"ReadSeeker", Type, 0, ""}, + {"ReadWriteCloser", Type, 0, ""}, + {"ReadWriteSeeker", Type, 0, ""}, + {"ReadWriter", Type, 0, ""}, + {"Reader", Type, 0, ""}, + {"ReaderAt", Type, 0, ""}, + {"ReaderFrom", Type, 0, ""}, + {"RuneReader", Type, 0, ""}, + {"RuneScanner", Type, 0, ""}, + {"SectionReader", Type, 0, ""}, + {"SeekCurrent", Const, 7, ""}, + {"SeekEnd", Const, 7, ""}, + {"SeekStart", Const, 7, ""}, + {"Seeker", Type, 0, ""}, + {"StringWriter", Type, 12, ""}, + {"TeeReader", Func, 0, "func(r Reader, w Writer) Reader"}, + {"WriteCloser", Type, 0, ""}, + {"WriteSeeker", Type, 0, ""}, + {"WriteString", Func, 0, "func(w Writer, s string) (n int, err error)"}, + {"Writer", Type, 0, ""}, + {"WriterAt", Type, 0, ""}, + {"WriterTo", Type, 0, ""}, + }, + "io/fs": { + {"(*PathError).Error", Method, 16, ""}, + {"(*PathError).Timeout", Method, 16, ""}, + {"(*PathError).Unwrap", Method, 16, ""}, + {"(FileMode).IsDir", Method, 16, ""}, + {"(FileMode).IsRegular", Method, 16, ""}, + {"(FileMode).Perm", Method, 16, ""}, + {"(FileMode).String", Method, 16, ""}, + {"(FileMode).Type", Method, 16, ""}, + {"DirEntry", Type, 16, ""}, + {"ErrClosed", Var, 16, ""}, + {"ErrExist", Var, 16, ""}, + {"ErrInvalid", Var, 16, ""}, + {"ErrNotExist", Var, 16, ""}, + {"ErrPermission", Var, 16, ""}, + {"FS", Type, 16, ""}, + {"File", Type, 16, ""}, + {"FileInfo", Type, 16, ""}, + {"FileInfoToDirEntry", Func, 17, "func(info FileInfo) DirEntry"}, + {"FileMode", Type, 16, ""}, + {"FormatDirEntry", Func, 21, "func(dir DirEntry) string"}, + {"FormatFileInfo", Func, 21, "func(info FileInfo) string"}, + {"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"}, + {"GlobFS", Type, 16, ""}, + {"Lstat", Func, 25, ""}, + {"ModeAppend", Const, 16, ""}, + {"ModeCharDevice", Const, 16, ""}, + {"ModeDevice", Const, 16, ""}, + {"ModeDir", Const, 16, ""}, + {"ModeExclusive", Const, 16, ""}, + {"ModeIrregular", Const, 16, ""}, + {"ModeNamedPipe", Const, 16, ""}, + {"ModePerm", Const, 16, ""}, + {"ModeSetgid", Const, 16, ""}, + {"ModeSetuid", Const, 16, ""}, + {"ModeSocket", Const, 16, ""}, + {"ModeSticky", Const, 16, ""}, + {"ModeSymlink", Const, 16, ""}, + {"ModeTemporary", Const, 16, ""}, + {"ModeType", Const, 16, ""}, + {"PathError", Type, 16, ""}, + {"PathError.Err", Field, 16, ""}, + {"PathError.Op", Field, 16, ""}, + {"PathError.Path", Field, 16, ""}, + {"ReadDir", Func, 16, "func(fsys FS, name string) ([]DirEntry, error)"}, + {"ReadDirFS", Type, 16, ""}, + {"ReadDirFile", Type, 16, ""}, + {"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"}, + {"ReadFileFS", Type, 16, ""}, + {"ReadLink", Func, 25, ""}, + {"ReadLinkFS", Type, 25, ""}, + {"SkipAll", Var, 20, ""}, + {"SkipDir", Var, 16, ""}, + {"Stat", Func, 16, "func(fsys FS, name string) (FileInfo, error)"}, + {"StatFS", Type, 16, ""}, + {"Sub", Func, 16, "func(fsys FS, dir string) (FS, error)"}, + {"SubFS", Type, 16, ""}, + {"ValidPath", Func, 16, "func(name string) bool"}, + {"WalkDir", Func, 16, "func(fsys FS, root string, fn WalkDirFunc) error"}, + {"WalkDirFunc", Type, 16, ""}, + }, + "io/ioutil": { + {"Discard", Var, 0, ""}, + {"NopCloser", Func, 0, "func(r io.Reader) io.ReadCloser"}, + {"ReadAll", Func, 0, "func(r io.Reader) ([]byte, error)"}, + {"ReadDir", Func, 0, "func(dirname string) ([]fs.FileInfo, error)"}, + {"ReadFile", Func, 0, "func(filename string) ([]byte, error)"}, + {"TempDir", Func, 0, "func(dir string, pattern string) (name string, err error)"}, + {"TempFile", Func, 0, "func(dir string, pattern string) (f *os.File, err error)"}, + {"WriteFile", Func, 0, "func(filename string, data []byte, perm fs.FileMode) error"}, + }, + "iter": { + {"Pull", Func, 23, "func[V any](seq Seq[V]) (next func() (V, bool), stop func())"}, + {"Pull2", Func, 23, "func[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func())"}, + {"Seq", Type, 23, ""}, + {"Seq2", Type, 23, ""}, + }, + "log": { + {"(*Logger).Fatal", Method, 0, ""}, + {"(*Logger).Fatalf", Method, 0, ""}, + {"(*Logger).Fatalln", Method, 0, ""}, + {"(*Logger).Flags", Method, 0, ""}, + {"(*Logger).Output", Method, 0, ""}, + {"(*Logger).Panic", Method, 0, ""}, + {"(*Logger).Panicf", Method, 0, ""}, + {"(*Logger).Panicln", Method, 0, ""}, + {"(*Logger).Prefix", Method, 0, ""}, + {"(*Logger).Print", Method, 0, ""}, + {"(*Logger).Printf", Method, 0, ""}, + {"(*Logger).Println", Method, 0, ""}, + {"(*Logger).SetFlags", Method, 0, ""}, + {"(*Logger).SetOutput", Method, 5, ""}, + {"(*Logger).SetPrefix", Method, 0, ""}, + {"(*Logger).Writer", Method, 12, ""}, + {"Default", Func, 16, "func() *Logger"}, + {"Fatal", Func, 0, "func(v ...any)"}, + {"Fatalf", Func, 0, "func(format string, v ...any)"}, + {"Fatalln", Func, 0, "func(v ...any)"}, + {"Flags", Func, 0, "func() int"}, + {"LUTC", Const, 5, ""}, + {"Ldate", Const, 0, ""}, + {"Llongfile", Const, 0, ""}, + {"Lmicroseconds", Const, 0, ""}, + {"Lmsgprefix", Const, 14, ""}, + {"Logger", Type, 0, ""}, + {"Lshortfile", Const, 0, ""}, + {"LstdFlags", Const, 0, ""}, + {"Ltime", Const, 0, ""}, + {"New", Func, 0, "func(out io.Writer, prefix string, flag int) *Logger"}, + {"Output", Func, 5, "func(calldepth int, s string) error"}, + {"Panic", Func, 0, "func(v ...any)"}, + {"Panicf", Func, 0, "func(format string, v ...any)"}, + {"Panicln", Func, 0, "func(v ...any)"}, + {"Prefix", Func, 0, "func() string"}, + {"Print", Func, 0, "func(v ...any)"}, + {"Printf", Func, 0, "func(format string, v ...any)"}, + {"Println", Func, 0, "func(v ...any)"}, + {"SetFlags", Func, 0, "func(flag int)"}, + {"SetOutput", Func, 0, "func(w io.Writer)"}, + {"SetPrefix", Func, 0, "func(prefix string)"}, + {"Writer", Func, 13, "func() io.Writer"}, + }, + "log/slog": { + {"(*JSONHandler).Enabled", Method, 21, ""}, + {"(*JSONHandler).Handle", Method, 21, ""}, + {"(*JSONHandler).WithAttrs", Method, 21, ""}, + {"(*JSONHandler).WithGroup", Method, 21, ""}, + {"(*Level).UnmarshalJSON", Method, 21, ""}, + {"(*Level).UnmarshalText", Method, 21, ""}, + {"(*LevelVar).AppendText", Method, 24, ""}, + {"(*LevelVar).Level", Method, 21, ""}, + {"(*LevelVar).MarshalText", Method, 21, ""}, + {"(*LevelVar).Set", Method, 21, ""}, + {"(*LevelVar).String", Method, 21, ""}, + {"(*LevelVar).UnmarshalText", Method, 21, ""}, + {"(*Logger).Debug", Method, 21, ""}, + {"(*Logger).DebugContext", Method, 21, ""}, + {"(*Logger).Enabled", Method, 21, ""}, + {"(*Logger).Error", Method, 21, ""}, + {"(*Logger).ErrorContext", Method, 21, ""}, + {"(*Logger).Handler", Method, 21, ""}, + {"(*Logger).Info", Method, 21, ""}, + {"(*Logger).InfoContext", Method, 21, ""}, + {"(*Logger).Log", Method, 21, ""}, + {"(*Logger).LogAttrs", Method, 21, ""}, + {"(*Logger).Warn", Method, 21, ""}, + {"(*Logger).WarnContext", Method, 21, ""}, + {"(*Logger).With", Method, 21, ""}, + {"(*Logger).WithGroup", Method, 21, ""}, + {"(*Record).Add", Method, 21, ""}, + {"(*Record).AddAttrs", Method, 21, ""}, + {"(*TextHandler).Enabled", Method, 21, ""}, + {"(*TextHandler).Handle", Method, 21, ""}, + {"(*TextHandler).WithAttrs", Method, 21, ""}, + {"(*TextHandler).WithGroup", Method, 21, ""}, + {"(Attr).Equal", Method, 21, ""}, + {"(Attr).String", Method, 21, ""}, + {"(Kind).String", Method, 21, ""}, + {"(Level).AppendText", Method, 24, ""}, + {"(Level).Level", Method, 21, ""}, + {"(Level).MarshalJSON", Method, 21, ""}, + {"(Level).MarshalText", Method, 21, ""}, + {"(Level).String", Method, 21, ""}, + {"(Record).Attrs", Method, 21, ""}, + {"(Record).Clone", Method, 21, ""}, + {"(Record).NumAttrs", Method, 21, ""}, + {"(Value).Any", Method, 21, ""}, + {"(Value).Bool", Method, 21, ""}, + {"(Value).Duration", Method, 21, ""}, + {"(Value).Equal", Method, 21, ""}, + {"(Value).Float64", Method, 21, ""}, + {"(Value).Group", Method, 21, ""}, + {"(Value).Int64", Method, 21, ""}, + {"(Value).Kind", Method, 21, ""}, + {"(Value).LogValuer", Method, 21, ""}, + {"(Value).Resolve", Method, 21, ""}, + {"(Value).String", Method, 21, ""}, + {"(Value).Time", Method, 21, ""}, + {"(Value).Uint64", Method, 21, ""}, + {"Any", Func, 21, "func(key string, value any) Attr"}, + {"AnyValue", Func, 21, "func(v any) Value"}, + {"Attr", Type, 21, ""}, + {"Attr.Key", Field, 21, ""}, + {"Attr.Value", Field, 21, ""}, + {"Bool", Func, 21, "func(key string, v bool) Attr"}, + {"BoolValue", Func, 21, "func(v bool) Value"}, + {"Debug", Func, 21, "func(msg string, args ...any)"}, + {"DebugContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"}, + {"Default", Func, 21, "func() *Logger"}, + {"DiscardHandler", Var, 24, ""}, + {"Duration", Func, 21, "func(key string, v time.Duration) Attr"}, + {"DurationValue", Func, 21, "func(v time.Duration) Value"}, + {"Error", Func, 21, "func(msg string, args ...any)"}, + {"ErrorContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"}, + {"Float64", Func, 21, "func(key string, v float64) Attr"}, + {"Float64Value", Func, 21, "func(v float64) Value"}, + {"Group", Func, 21, "func(key string, args ...any) Attr"}, + {"GroupValue", Func, 21, "func(as ...Attr) Value"}, + {"Handler", Type, 21, ""}, + {"HandlerOptions", Type, 21, ""}, + {"HandlerOptions.AddSource", Field, 21, ""}, + {"HandlerOptions.Level", Field, 21, ""}, + {"HandlerOptions.ReplaceAttr", Field, 21, ""}, + {"Info", Func, 21, "func(msg string, args ...any)"}, + {"InfoContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"}, + {"Int", Func, 21, "func(key string, value int) Attr"}, + {"Int64", Func, 21, "func(key string, value int64) Attr"}, + {"Int64Value", Func, 21, "func(v int64) Value"}, + {"IntValue", Func, 21, "func(v int) Value"}, + {"JSONHandler", Type, 21, ""}, + {"Kind", Type, 21, ""}, + {"KindAny", Const, 21, ""}, + {"KindBool", Const, 21, ""}, + {"KindDuration", Const, 21, ""}, + {"KindFloat64", Const, 21, ""}, + {"KindGroup", Const, 21, ""}, + {"KindInt64", Const, 21, ""}, + {"KindLogValuer", Const, 21, ""}, + {"KindString", Const, 21, ""}, + {"KindTime", Const, 21, ""}, + {"KindUint64", Const, 21, ""}, + {"Level", Type, 21, ""}, + {"LevelDebug", Const, 21, ""}, + {"LevelError", Const, 21, ""}, + {"LevelInfo", Const, 21, ""}, + {"LevelKey", Const, 21, ""}, + {"LevelVar", Type, 21, ""}, + {"LevelWarn", Const, 21, ""}, + {"Leveler", Type, 21, ""}, + {"Log", Func, 21, "func(ctx context.Context, level Level, msg string, args ...any)"}, + {"LogAttrs", Func, 21, "func(ctx context.Context, level Level, msg string, attrs ...Attr)"}, + {"LogValuer", Type, 21, ""}, + {"Logger", Type, 21, ""}, + {"MessageKey", Const, 21, ""}, + {"New", Func, 21, "func(h Handler) *Logger"}, + {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"}, + {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"}, + {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"}, + {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"}, + {"Record", Type, 21, ""}, + {"Record.Level", Field, 21, ""}, + {"Record.Message", Field, 21, ""}, + {"Record.PC", Field, 21, ""}, + {"Record.Time", Field, 21, ""}, + {"SetDefault", Func, 21, "func(l *Logger)"}, + {"SetLogLoggerLevel", Func, 22, "func(level Level) (oldLevel Level)"}, + {"Source", Type, 21, ""}, + {"Source.File", Field, 21, ""}, + {"Source.Function", Field, 21, ""}, + {"Source.Line", Field, 21, ""}, + {"SourceKey", Const, 21, ""}, + {"String", Func, 21, "func(key string, value string) Attr"}, + {"StringValue", Func, 21, "func(value string) Value"}, + {"TextHandler", Type, 21, ""}, + {"Time", Func, 21, "func(key string, v time.Time) Attr"}, + {"TimeKey", Const, 21, ""}, + {"TimeValue", Func, 21, "func(v time.Time) Value"}, + {"Uint64", Func, 21, "func(key string, v uint64) Attr"}, + {"Uint64Value", Func, 21, "func(v uint64) Value"}, + {"Value", Type, 21, ""}, + {"Warn", Func, 21, "func(msg string, args ...any)"}, + {"WarnContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"}, + {"With", Func, 21, "func(args ...any) *Logger"}, + }, + "log/syslog": { + {"(*Writer).Alert", Method, 0, ""}, + {"(*Writer).Close", Method, 0, ""}, + {"(*Writer).Crit", Method, 0, ""}, + {"(*Writer).Debug", Method, 0, ""}, + {"(*Writer).Emerg", Method, 0, ""}, + {"(*Writer).Err", Method, 0, ""}, + {"(*Writer).Info", Method, 0, ""}, + {"(*Writer).Notice", Method, 0, ""}, + {"(*Writer).Warning", Method, 0, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"Dial", Func, 0, "func(network string, raddr string, priority Priority, tag string) (*Writer, error)"}, + {"LOG_ALERT", Const, 0, ""}, + {"LOG_AUTH", Const, 1, ""}, + {"LOG_AUTHPRIV", Const, 1, ""}, + {"LOG_CRIT", Const, 0, ""}, + {"LOG_CRON", Const, 1, ""}, + {"LOG_DAEMON", Const, 1, ""}, + {"LOG_DEBUG", Const, 0, ""}, + {"LOG_EMERG", Const, 0, ""}, + {"LOG_ERR", Const, 0, ""}, + {"LOG_FTP", Const, 1, ""}, + {"LOG_INFO", Const, 0, ""}, + {"LOG_KERN", Const, 1, ""}, + {"LOG_LOCAL0", Const, 1, ""}, + {"LOG_LOCAL1", Const, 1, ""}, + {"LOG_LOCAL2", Const, 1, ""}, + {"LOG_LOCAL3", Const, 1, ""}, + {"LOG_LOCAL4", Const, 1, ""}, + {"LOG_LOCAL5", Const, 1, ""}, + {"LOG_LOCAL6", Const, 1, ""}, + {"LOG_LOCAL7", Const, 1, ""}, + {"LOG_LPR", Const, 1, ""}, + {"LOG_MAIL", Const, 1, ""}, + {"LOG_NEWS", Const, 1, ""}, + {"LOG_NOTICE", Const, 0, ""}, + {"LOG_SYSLOG", Const, 1, ""}, + {"LOG_USER", Const, 1, ""}, + {"LOG_UUCP", Const, 1, ""}, + {"LOG_WARNING", Const, 0, ""}, + {"New", Func, 0, "func(priority Priority, tag string) (*Writer, error)"}, + {"NewLogger", Func, 0, "func(p Priority, logFlag int) (*log.Logger, error)"}, + {"Priority", Type, 0, ""}, + {"Writer", Type, 0, ""}, + }, + "maps": { + {"All", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq2[K, V]"}, + {"Clone", Func, 21, "func[M ~map[K]V, K comparable, V any](m M) M"}, + {"Collect", Func, 23, "func[K comparable, V any](seq iter.Seq2[K, V]) map[K]V"}, + {"Copy", Func, 21, "func[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2)"}, + {"DeleteFunc", Func, 21, "func[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool)"}, + {"Equal", Func, 21, "func[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool"}, + {"EqualFunc", Func, 21, "func[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool"}, + {"Insert", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map, seq iter.Seq2[K, V])"}, + {"Keys", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[K]"}, + {"Values", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[V]"}, + }, + "math": { + {"Abs", Func, 0, "func(x float64) float64"}, + {"Acos", Func, 0, "func(x float64) float64"}, + {"Acosh", Func, 0, "func(x float64) float64"}, + {"Asin", Func, 0, "func(x float64) float64"}, + {"Asinh", Func, 0, "func(x float64) float64"}, + {"Atan", Func, 0, "func(x float64) float64"}, + {"Atan2", Func, 0, "func(y float64, x float64) float64"}, + {"Atanh", Func, 0, "func(x float64) float64"}, + {"Cbrt", Func, 0, "func(x float64) float64"}, + {"Ceil", Func, 0, "func(x float64) float64"}, + {"Copysign", Func, 0, "func(f float64, sign float64) float64"}, + {"Cos", Func, 0, "func(x float64) float64"}, + {"Cosh", Func, 0, "func(x float64) float64"}, + {"Dim", Func, 0, "func(x float64, y float64) float64"}, + {"E", Const, 0, ""}, + {"Erf", Func, 0, "func(x float64) float64"}, + {"Erfc", Func, 0, "func(x float64) float64"}, + {"Erfcinv", Func, 10, "func(x float64) float64"}, + {"Erfinv", Func, 10, "func(x float64) float64"}, + {"Exp", Func, 0, "func(x float64) float64"}, + {"Exp2", Func, 0, "func(x float64) float64"}, + {"Expm1", Func, 0, "func(x float64) float64"}, + {"FMA", Func, 14, "func(x float64, y float64, z float64) float64"}, + {"Float32bits", Func, 0, "func(f float32) uint32"}, + {"Float32frombits", Func, 0, "func(b uint32) float32"}, + {"Float64bits", Func, 0, "func(f float64) uint64"}, + {"Float64frombits", Func, 0, "func(b uint64) float64"}, + {"Floor", Func, 0, "func(x float64) float64"}, + {"Frexp", Func, 0, "func(f float64) (frac float64, exp int)"}, + {"Gamma", Func, 0, "func(x float64) float64"}, + {"Hypot", Func, 0, "func(p float64, q float64) float64"}, + {"Ilogb", Func, 0, "func(x float64) int"}, + {"Inf", Func, 0, "func(sign int) float64"}, + {"IsInf", Func, 0, "func(f float64, sign int) bool"}, + {"IsNaN", Func, 0, "func(f float64) (is bool)"}, + {"J0", Func, 0, "func(x float64) float64"}, + {"J1", Func, 0, "func(x float64) float64"}, + {"Jn", Func, 0, "func(n int, x float64) float64"}, + {"Ldexp", Func, 0, "func(frac float64, exp int) float64"}, + {"Lgamma", Func, 0, "func(x float64) (lgamma float64, sign int)"}, + {"Ln10", Const, 0, ""}, + {"Ln2", Const, 0, ""}, + {"Log", Func, 0, "func(x float64) float64"}, + {"Log10", Func, 0, "func(x float64) float64"}, + {"Log10E", Const, 0, ""}, + {"Log1p", Func, 0, "func(x float64) float64"}, + {"Log2", Func, 0, "func(x float64) float64"}, + {"Log2E", Const, 0, ""}, + {"Logb", Func, 0, "func(x float64) float64"}, + {"Max", Func, 0, "func(x float64, y float64) float64"}, + {"MaxFloat32", Const, 0, ""}, + {"MaxFloat64", Const, 0, ""}, + {"MaxInt", Const, 17, ""}, + {"MaxInt16", Const, 0, ""}, + {"MaxInt32", Const, 0, ""}, + {"MaxInt64", Const, 0, ""}, + {"MaxInt8", Const, 0, ""}, + {"MaxUint", Const, 17, ""}, + {"MaxUint16", Const, 0, ""}, + {"MaxUint32", Const, 0, ""}, + {"MaxUint64", Const, 0, ""}, + {"MaxUint8", Const, 0, ""}, + {"Min", Func, 0, "func(x float64, y float64) float64"}, + {"MinInt", Const, 17, ""}, + {"MinInt16", Const, 0, ""}, + {"MinInt32", Const, 0, ""}, + {"MinInt64", Const, 0, ""}, + {"MinInt8", Const, 0, ""}, + {"Mod", Func, 0, "func(x float64, y float64) float64"}, + {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"}, + {"NaN", Func, 0, "func() float64"}, + {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"}, + {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"}, + {"Phi", Const, 0, ""}, + {"Pi", Const, 0, ""}, + {"Pow", Func, 0, "func(x float64, y float64) float64"}, + {"Pow10", Func, 0, "func(n int) float64"}, + {"Remainder", Func, 0, "func(x float64, y float64) float64"}, + {"Round", Func, 10, "func(x float64) float64"}, + {"RoundToEven", Func, 10, "func(x float64) float64"}, + {"Signbit", Func, 0, "func(x float64) bool"}, + {"Sin", Func, 0, "func(x float64) float64"}, + {"Sincos", Func, 0, "func(x float64) (sin float64, cos float64)"}, + {"Sinh", Func, 0, "func(x float64) float64"}, + {"SmallestNonzeroFloat32", Const, 0, ""}, + {"SmallestNonzeroFloat64", Const, 0, ""}, + {"Sqrt", Func, 0, "func(x float64) float64"}, + {"Sqrt2", Const, 0, ""}, + {"SqrtE", Const, 0, ""}, + {"SqrtPhi", Const, 0, ""}, + {"SqrtPi", Const, 0, ""}, + {"Tan", Func, 0, "func(x float64) float64"}, + {"Tanh", Func, 0, "func(x float64) float64"}, + {"Trunc", Func, 0, "func(x float64) float64"}, + {"Y0", Func, 0, "func(x float64) float64"}, + {"Y1", Func, 0, "func(x float64) float64"}, + {"Yn", Func, 0, "func(n int, x float64) float64"}, + }, + "math/big": { + {"(*Float).Abs", Method, 5, ""}, + {"(*Float).Acc", Method, 5, ""}, + {"(*Float).Add", Method, 5, ""}, + {"(*Float).Append", Method, 5, ""}, + {"(*Float).AppendText", Method, 24, ""}, + {"(*Float).Cmp", Method, 5, ""}, + {"(*Float).Copy", Method, 5, ""}, + {"(*Float).Float32", Method, 5, ""}, + {"(*Float).Float64", Method, 5, ""}, + {"(*Float).Format", Method, 5, ""}, + {"(*Float).GobDecode", Method, 7, ""}, + {"(*Float).GobEncode", Method, 7, ""}, + {"(*Float).Int", Method, 5, ""}, + {"(*Float).Int64", Method, 5, ""}, + {"(*Float).IsInf", Method, 5, ""}, + {"(*Float).IsInt", Method, 5, ""}, + {"(*Float).MantExp", Method, 5, ""}, + {"(*Float).MarshalText", Method, 6, ""}, + {"(*Float).MinPrec", Method, 5, ""}, + {"(*Float).Mode", Method, 5, ""}, + {"(*Float).Mul", Method, 5, ""}, + {"(*Float).Neg", Method, 5, ""}, + {"(*Float).Parse", Method, 5, ""}, + {"(*Float).Prec", Method, 5, ""}, + {"(*Float).Quo", Method, 5, ""}, + {"(*Float).Rat", Method, 5, ""}, + {"(*Float).Scan", Method, 8, ""}, + {"(*Float).Set", Method, 5, ""}, + {"(*Float).SetFloat64", Method, 5, ""}, + {"(*Float).SetInf", Method, 5, ""}, + {"(*Float).SetInt", Method, 5, ""}, + {"(*Float).SetInt64", Method, 5, ""}, + {"(*Float).SetMantExp", Method, 5, ""}, + {"(*Float).SetMode", Method, 5, ""}, + {"(*Float).SetPrec", Method, 5, ""}, + {"(*Float).SetRat", Method, 5, ""}, + {"(*Float).SetString", Method, 5, ""}, + {"(*Float).SetUint64", Method, 5, ""}, + {"(*Float).Sign", Method, 5, ""}, + {"(*Float).Signbit", Method, 5, ""}, + {"(*Float).Sqrt", Method, 10, ""}, + {"(*Float).String", Method, 5, ""}, + {"(*Float).Sub", Method, 5, ""}, + {"(*Float).Text", Method, 5, ""}, + {"(*Float).Uint64", Method, 5, ""}, + {"(*Float).UnmarshalText", Method, 6, ""}, + {"(*Int).Abs", Method, 0, ""}, + {"(*Int).Add", Method, 0, ""}, + {"(*Int).And", Method, 0, ""}, + {"(*Int).AndNot", Method, 0, ""}, + {"(*Int).Append", Method, 6, ""}, + {"(*Int).AppendText", Method, 24, ""}, + {"(*Int).Binomial", Method, 0, ""}, + {"(*Int).Bit", Method, 0, ""}, + {"(*Int).BitLen", Method, 0, ""}, + {"(*Int).Bits", Method, 0, ""}, + {"(*Int).Bytes", Method, 0, ""}, + {"(*Int).Cmp", Method, 0, ""}, + {"(*Int).CmpAbs", Method, 10, ""}, + {"(*Int).Div", Method, 0, ""}, + {"(*Int).DivMod", Method, 0, ""}, + {"(*Int).Exp", Method, 0, ""}, + {"(*Int).FillBytes", Method, 15, ""}, + {"(*Int).Float64", Method, 21, ""}, + {"(*Int).Format", Method, 0, ""}, + {"(*Int).GCD", Method, 0, ""}, + {"(*Int).GobDecode", Method, 0, ""}, + {"(*Int).GobEncode", Method, 0, ""}, + {"(*Int).Int64", Method, 0, ""}, + {"(*Int).IsInt64", Method, 9, ""}, + {"(*Int).IsUint64", Method, 9, ""}, + {"(*Int).Lsh", Method, 0, ""}, + {"(*Int).MarshalJSON", Method, 1, ""}, + {"(*Int).MarshalText", Method, 3, ""}, + {"(*Int).Mod", Method, 0, ""}, + {"(*Int).ModInverse", Method, 0, ""}, + {"(*Int).ModSqrt", Method, 5, ""}, + {"(*Int).Mul", Method, 0, ""}, + {"(*Int).MulRange", Method, 0, ""}, + {"(*Int).Neg", Method, 0, ""}, + {"(*Int).Not", Method, 0, ""}, + {"(*Int).Or", Method, 0, ""}, + {"(*Int).ProbablyPrime", Method, 0, ""}, + {"(*Int).Quo", Method, 0, ""}, + {"(*Int).QuoRem", Method, 0, ""}, + {"(*Int).Rand", Method, 0, ""}, + {"(*Int).Rem", Method, 0, ""}, + {"(*Int).Rsh", Method, 0, ""}, + {"(*Int).Scan", Method, 0, ""}, + {"(*Int).Set", Method, 0, ""}, + {"(*Int).SetBit", Method, 0, ""}, + {"(*Int).SetBits", Method, 0, ""}, + {"(*Int).SetBytes", Method, 0, ""}, + {"(*Int).SetInt64", Method, 0, ""}, + {"(*Int).SetString", Method, 0, ""}, + {"(*Int).SetUint64", Method, 1, ""}, + {"(*Int).Sign", Method, 0, ""}, + {"(*Int).Sqrt", Method, 8, ""}, + {"(*Int).String", Method, 0, ""}, + {"(*Int).Sub", Method, 0, ""}, + {"(*Int).Text", Method, 6, ""}, + {"(*Int).TrailingZeroBits", Method, 13, ""}, + {"(*Int).Uint64", Method, 1, ""}, + {"(*Int).UnmarshalJSON", Method, 1, ""}, + {"(*Int).UnmarshalText", Method, 3, ""}, + {"(*Int).Xor", Method, 0, ""}, + {"(*Rat).Abs", Method, 0, ""}, + {"(*Rat).Add", Method, 0, ""}, + {"(*Rat).AppendText", Method, 24, ""}, + {"(*Rat).Cmp", Method, 0, ""}, + {"(*Rat).Denom", Method, 0, ""}, + {"(*Rat).Float32", Method, 4, ""}, + {"(*Rat).Float64", Method, 1, ""}, + {"(*Rat).FloatPrec", Method, 22, ""}, + {"(*Rat).FloatString", Method, 0, ""}, + {"(*Rat).GobDecode", Method, 0, ""}, + {"(*Rat).GobEncode", Method, 0, ""}, + {"(*Rat).Inv", Method, 0, ""}, + {"(*Rat).IsInt", Method, 0, ""}, + {"(*Rat).MarshalText", Method, 3, ""}, + {"(*Rat).Mul", Method, 0, ""}, + {"(*Rat).Neg", Method, 0, ""}, + {"(*Rat).Num", Method, 0, ""}, + {"(*Rat).Quo", Method, 0, ""}, + {"(*Rat).RatString", Method, 0, ""}, + {"(*Rat).Scan", Method, 0, ""}, + {"(*Rat).Set", Method, 0, ""}, + {"(*Rat).SetFloat64", Method, 1, ""}, + {"(*Rat).SetFrac", Method, 0, ""}, + {"(*Rat).SetFrac64", Method, 0, ""}, + {"(*Rat).SetInt", Method, 0, ""}, + {"(*Rat).SetInt64", Method, 0, ""}, + {"(*Rat).SetString", Method, 0, ""}, + {"(*Rat).SetUint64", Method, 13, ""}, + {"(*Rat).Sign", Method, 0, ""}, + {"(*Rat).String", Method, 0, ""}, + {"(*Rat).Sub", Method, 0, ""}, + {"(*Rat).UnmarshalText", Method, 3, ""}, + {"(Accuracy).String", Method, 5, ""}, + {"(ErrNaN).Error", Method, 5, ""}, + {"(RoundingMode).String", Method, 5, ""}, + {"Above", Const, 5, ""}, + {"Accuracy", Type, 5, ""}, + {"AwayFromZero", Const, 5, ""}, + {"Below", Const, 5, ""}, + {"ErrNaN", Type, 5, ""}, + {"Exact", Const, 5, ""}, + {"Float", Type, 5, ""}, + {"Int", Type, 0, ""}, + {"Jacobi", Func, 5, "func(x *Int, y *Int) int"}, + {"MaxBase", Const, 0, ""}, + {"MaxExp", Const, 5, ""}, + {"MaxPrec", Const, 5, ""}, + {"MinExp", Const, 5, ""}, + {"NewFloat", Func, 5, "func(x float64) *Float"}, + {"NewInt", Func, 0, "func(x int64) *Int"}, + {"NewRat", Func, 0, "func(a int64, b int64) *Rat"}, + {"ParseFloat", Func, 5, "func(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error)"}, + {"Rat", Type, 0, ""}, + {"RoundingMode", Type, 5, ""}, + {"ToNearestAway", Const, 5, ""}, + {"ToNearestEven", Const, 5, ""}, + {"ToNegativeInf", Const, 5, ""}, + {"ToPositiveInf", Const, 5, ""}, + {"ToZero", Const, 5, ""}, + {"Word", Type, 0, ""}, + }, + "math/bits": { + {"Add", Func, 12, "func(x uint, y uint, carry uint) (sum uint, carryOut uint)"}, + {"Add32", Func, 12, "func(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32)"}, + {"Add64", Func, 12, "func(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64)"}, + {"Div", Func, 12, "func(hi uint, lo uint, y uint) (quo uint, rem uint)"}, + {"Div32", Func, 12, "func(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32)"}, + {"Div64", Func, 12, "func(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64)"}, + {"LeadingZeros", Func, 9, "func(x uint) int"}, + {"LeadingZeros16", Func, 9, "func(x uint16) int"}, + {"LeadingZeros32", Func, 9, "func(x uint32) int"}, + {"LeadingZeros64", Func, 9, "func(x uint64) int"}, + {"LeadingZeros8", Func, 9, "func(x uint8) int"}, + {"Len", Func, 9, "func(x uint) int"}, + {"Len16", Func, 9, "func(x uint16) (n int)"}, + {"Len32", Func, 9, "func(x uint32) (n int)"}, + {"Len64", Func, 9, "func(x uint64) (n int)"}, + {"Len8", Func, 9, "func(x uint8) int"}, + {"Mul", Func, 12, "func(x uint, y uint) (hi uint, lo uint)"}, + {"Mul32", Func, 12, "func(x uint32, y uint32) (hi uint32, lo uint32)"}, + {"Mul64", Func, 12, "func(x uint64, y uint64) (hi uint64, lo uint64)"}, + {"OnesCount", Func, 9, "func(x uint) int"}, + {"OnesCount16", Func, 9, "func(x uint16) int"}, + {"OnesCount32", Func, 9, "func(x uint32) int"}, + {"OnesCount64", Func, 9, "func(x uint64) int"}, + {"OnesCount8", Func, 9, "func(x uint8) int"}, + {"Rem", Func, 14, "func(hi uint, lo uint, y uint) uint"}, + {"Rem32", Func, 14, "func(hi uint32, lo uint32, y uint32) uint32"}, + {"Rem64", Func, 14, "func(hi uint64, lo uint64, y uint64) uint64"}, + {"Reverse", Func, 9, "func(x uint) uint"}, + {"Reverse16", Func, 9, "func(x uint16) uint16"}, + {"Reverse32", Func, 9, "func(x uint32) uint32"}, + {"Reverse64", Func, 9, "func(x uint64) uint64"}, + {"Reverse8", Func, 9, "func(x uint8) uint8"}, + {"ReverseBytes", Func, 9, "func(x uint) uint"}, + {"ReverseBytes16", Func, 9, "func(x uint16) uint16"}, + {"ReverseBytes32", Func, 9, "func(x uint32) uint32"}, + {"ReverseBytes64", Func, 9, "func(x uint64) uint64"}, + {"RotateLeft", Func, 9, "func(x uint, k int) uint"}, + {"RotateLeft16", Func, 9, "func(x uint16, k int) uint16"}, + {"RotateLeft32", Func, 9, "func(x uint32, k int) uint32"}, + {"RotateLeft64", Func, 9, "func(x uint64, k int) uint64"}, + {"RotateLeft8", Func, 9, "func(x uint8, k int) uint8"}, + {"Sub", Func, 12, "func(x uint, y uint, borrow uint) (diff uint, borrowOut uint)"}, + {"Sub32", Func, 12, "func(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32)"}, + {"Sub64", Func, 12, "func(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64)"}, + {"TrailingZeros", Func, 9, "func(x uint) int"}, + {"TrailingZeros16", Func, 9, "func(x uint16) int"}, + {"TrailingZeros32", Func, 9, "func(x uint32) int"}, + {"TrailingZeros64", Func, 9, "func(x uint64) int"}, + {"TrailingZeros8", Func, 9, "func(x uint8) int"}, + {"UintSize", Const, 9, ""}, + }, + "math/cmplx": { + {"Abs", Func, 0, "func(x complex128) float64"}, + {"Acos", Func, 0, "func(x complex128) complex128"}, + {"Acosh", Func, 0, "func(x complex128) complex128"}, + {"Asin", Func, 0, "func(x complex128) complex128"}, + {"Asinh", Func, 0, "func(x complex128) complex128"}, + {"Atan", Func, 0, "func(x complex128) complex128"}, + {"Atanh", Func, 0, "func(x complex128) complex128"}, + {"Conj", Func, 0, "func(x complex128) complex128"}, + {"Cos", Func, 0, "func(x complex128) complex128"}, + {"Cosh", Func, 0, "func(x complex128) complex128"}, + {"Cot", Func, 0, "func(x complex128) complex128"}, + {"Exp", Func, 0, "func(x complex128) complex128"}, + {"Inf", Func, 0, "func() complex128"}, + {"IsInf", Func, 0, "func(x complex128) bool"}, + {"IsNaN", Func, 0, "func(x complex128) bool"}, + {"Log", Func, 0, "func(x complex128) complex128"}, + {"Log10", Func, 0, "func(x complex128) complex128"}, + {"NaN", Func, 0, "func() complex128"}, + {"Phase", Func, 0, "func(x complex128) float64"}, + {"Polar", Func, 0, "func(x complex128) (r float64, θ float64)"}, + {"Pow", Func, 0, "func(x complex128, y complex128) complex128"}, + {"Rect", Func, 0, "func(r float64, θ float64) complex128"}, + {"Sin", Func, 0, "func(x complex128) complex128"}, + {"Sinh", Func, 0, "func(x complex128) complex128"}, + {"Sqrt", Func, 0, "func(x complex128) complex128"}, + {"Tan", Func, 0, "func(x complex128) complex128"}, + {"Tanh", Func, 0, "func(x complex128) complex128"}, + }, + "math/rand": { + {"(*Rand).ExpFloat64", Method, 0, ""}, + {"(*Rand).Float32", Method, 0, ""}, + {"(*Rand).Float64", Method, 0, ""}, + {"(*Rand).Int", Method, 0, ""}, + {"(*Rand).Int31", Method, 0, ""}, + {"(*Rand).Int31n", Method, 0, ""}, + {"(*Rand).Int63", Method, 0, ""}, + {"(*Rand).Int63n", Method, 0, ""}, + {"(*Rand).Intn", Method, 0, ""}, + {"(*Rand).NormFloat64", Method, 0, ""}, + {"(*Rand).Perm", Method, 0, ""}, + {"(*Rand).Read", Method, 6, ""}, + {"(*Rand).Seed", Method, 0, ""}, + {"(*Rand).Shuffle", Method, 10, ""}, + {"(*Rand).Uint32", Method, 0, ""}, + {"(*Rand).Uint64", Method, 8, ""}, + {"(*Zipf).Uint64", Method, 0, ""}, + {"ExpFloat64", Func, 0, "func() float64"}, + {"Float32", Func, 0, "func() float32"}, + {"Float64", Func, 0, "func() float64"}, + {"Int", Func, 0, "func() int"}, + {"Int31", Func, 0, "func() int32"}, + {"Int31n", Func, 0, "func(n int32) int32"}, + {"Int63", Func, 0, "func() int64"}, + {"Int63n", Func, 0, "func(n int64) int64"}, + {"Intn", Func, 0, "func(n int) int"}, + {"New", Func, 0, "func(src Source) *Rand"}, + {"NewSource", Func, 0, "func(seed int64) Source"}, + {"NewZipf", Func, 0, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"}, + {"NormFloat64", Func, 0, "func() float64"}, + {"Perm", Func, 0, "func(n int) []int"}, + {"Rand", Type, 0, ""}, + {"Read", Func, 6, "func(p []byte) (n int, err error)"}, + {"Seed", Func, 0, "func(seed int64)"}, + {"Shuffle", Func, 10, "func(n int, swap func(i int, j int))"}, + {"Source", Type, 0, ""}, + {"Source64", Type, 8, ""}, + {"Uint32", Func, 0, "func() uint32"}, + {"Uint64", Func, 8, "func() uint64"}, + {"Zipf", Type, 0, ""}, + }, + "math/rand/v2": { + {"(*ChaCha8).AppendBinary", Method, 24, ""}, + {"(*ChaCha8).MarshalBinary", Method, 22, ""}, + {"(*ChaCha8).Read", Method, 23, ""}, + {"(*ChaCha8).Seed", Method, 22, ""}, + {"(*ChaCha8).Uint64", Method, 22, ""}, + {"(*ChaCha8).UnmarshalBinary", Method, 22, ""}, + {"(*PCG).AppendBinary", Method, 24, ""}, + {"(*PCG).MarshalBinary", Method, 22, ""}, + {"(*PCG).Seed", Method, 22, ""}, + {"(*PCG).Uint64", Method, 22, ""}, + {"(*PCG).UnmarshalBinary", Method, 22, ""}, + {"(*Rand).ExpFloat64", Method, 22, ""}, + {"(*Rand).Float32", Method, 22, ""}, + {"(*Rand).Float64", Method, 22, ""}, + {"(*Rand).Int", Method, 22, ""}, + {"(*Rand).Int32", Method, 22, ""}, + {"(*Rand).Int32N", Method, 22, ""}, + {"(*Rand).Int64", Method, 22, ""}, + {"(*Rand).Int64N", Method, 22, ""}, + {"(*Rand).IntN", Method, 22, ""}, + {"(*Rand).NormFloat64", Method, 22, ""}, + {"(*Rand).Perm", Method, 22, ""}, + {"(*Rand).Shuffle", Method, 22, ""}, + {"(*Rand).Uint", Method, 23, ""}, + {"(*Rand).Uint32", Method, 22, ""}, + {"(*Rand).Uint32N", Method, 22, ""}, + {"(*Rand).Uint64", Method, 22, ""}, + {"(*Rand).Uint64N", Method, 22, ""}, + {"(*Rand).UintN", Method, 22, ""}, + {"(*Zipf).Uint64", Method, 22, ""}, + {"ChaCha8", Type, 22, ""}, + {"ExpFloat64", Func, 22, "func() float64"}, + {"Float32", Func, 22, "func() float32"}, + {"Float64", Func, 22, "func() float64"}, + {"Int", Func, 22, "func() int"}, + {"Int32", Func, 22, "func() int32"}, + {"Int32N", Func, 22, "func(n int32) int32"}, + {"Int64", Func, 22, "func() int64"}, + {"Int64N", Func, 22, "func(n int64) int64"}, + {"IntN", Func, 22, "func(n int) int"}, + {"N", Func, 22, "func[Int intType](n Int) Int"}, + {"New", Func, 22, "func(src Source) *Rand"}, + {"NewChaCha8", Func, 22, "func(seed [32]byte) *ChaCha8"}, + {"NewPCG", Func, 22, "func(seed1 uint64, seed2 uint64) *PCG"}, + {"NewZipf", Func, 22, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"}, + {"NormFloat64", Func, 22, "func() float64"}, + {"PCG", Type, 22, ""}, + {"Perm", Func, 22, "func(n int) []int"}, + {"Rand", Type, 22, ""}, + {"Shuffle", Func, 22, "func(n int, swap func(i int, j int))"}, + {"Source", Type, 22, ""}, + {"Uint", Func, 23, "func() uint"}, + {"Uint32", Func, 22, "func() uint32"}, + {"Uint32N", Func, 22, "func(n uint32) uint32"}, + {"Uint64", Func, 22, "func() uint64"}, + {"Uint64N", Func, 22, "func(n uint64) uint64"}, + {"UintN", Func, 22, "func(n uint) uint"}, + {"Zipf", Type, 22, ""}, + }, + "mime": { + {"(*WordDecoder).Decode", Method, 5, ""}, + {"(*WordDecoder).DecodeHeader", Method, 5, ""}, + {"(WordEncoder).Encode", Method, 5, ""}, + {"AddExtensionType", Func, 0, "func(ext string, typ string) error"}, + {"BEncoding", Const, 5, ""}, + {"ErrInvalidMediaParameter", Var, 9, ""}, + {"ExtensionsByType", Func, 5, "func(typ string) ([]string, error)"}, + {"FormatMediaType", Func, 0, "func(t string, param map[string]string) string"}, + {"ParseMediaType", Func, 0, "func(v string) (mediatype string, params map[string]string, err error)"}, + {"QEncoding", Const, 5, ""}, + {"TypeByExtension", Func, 0, "func(ext string) string"}, + {"WordDecoder", Type, 5, ""}, + {"WordDecoder.CharsetReader", Field, 5, ""}, + {"WordEncoder", Type, 5, ""}, + }, + "mime/multipart": { + {"(*FileHeader).Open", Method, 0, ""}, + {"(*Form).RemoveAll", Method, 0, ""}, + {"(*Part).Close", Method, 0, ""}, + {"(*Part).FileName", Method, 0, ""}, + {"(*Part).FormName", Method, 0, ""}, + {"(*Part).Read", Method, 0, ""}, + {"(*Reader).NextPart", Method, 0, ""}, + {"(*Reader).NextRawPart", Method, 14, ""}, + {"(*Reader).ReadForm", Method, 0, ""}, + {"(*Writer).Boundary", Method, 0, ""}, + {"(*Writer).Close", Method, 0, ""}, + {"(*Writer).CreateFormField", Method, 0, ""}, + {"(*Writer).CreateFormFile", Method, 0, ""}, + {"(*Writer).CreatePart", Method, 0, ""}, + {"(*Writer).FormDataContentType", Method, 0, ""}, + {"(*Writer).SetBoundary", Method, 1, ""}, + {"(*Writer).WriteField", Method, 0, ""}, + {"ErrMessageTooLarge", Var, 9, ""}, + {"File", Type, 0, ""}, + {"FileContentDisposition", Func, 25, ""}, + {"FileHeader", Type, 0, ""}, + {"FileHeader.Filename", Field, 0, ""}, + {"FileHeader.Header", Field, 0, ""}, + {"FileHeader.Size", Field, 9, ""}, + {"Form", Type, 0, ""}, + {"Form.File", Field, 0, ""}, + {"Form.Value", Field, 0, ""}, + {"NewReader", Func, 0, "func(r io.Reader, boundary string) *Reader"}, + {"NewWriter", Func, 0, "func(w io.Writer) *Writer"}, + {"Part", Type, 0, ""}, + {"Part.Header", Field, 0, ""}, + {"Reader", Type, 0, ""}, + {"Writer", Type, 0, ""}, + }, + "mime/quotedprintable": { + {"(*Reader).Read", Method, 5, ""}, + {"(*Writer).Close", Method, 5, ""}, + {"(*Writer).Write", Method, 5, ""}, + {"NewReader", Func, 5, "func(r io.Reader) *Reader"}, + {"NewWriter", Func, 5, "func(w io.Writer) *Writer"}, + {"Reader", Type, 5, ""}, + {"Writer", Type, 5, ""}, + {"Writer.Binary", Field, 5, ""}, + }, + "net": { + {"(*AddrError).Error", Method, 0, ""}, + {"(*AddrError).Temporary", Method, 0, ""}, + {"(*AddrError).Timeout", Method, 0, ""}, + {"(*Buffers).Read", Method, 8, ""}, + {"(*Buffers).WriteTo", Method, 8, ""}, + {"(*DNSConfigError).Error", Method, 0, ""}, + {"(*DNSConfigError).Temporary", Method, 0, ""}, + {"(*DNSConfigError).Timeout", Method, 0, ""}, + {"(*DNSConfigError).Unwrap", Method, 13, ""}, + {"(*DNSError).Error", Method, 0, ""}, + {"(*DNSError).Temporary", Method, 0, ""}, + {"(*DNSError).Timeout", Method, 0, ""}, + {"(*DNSError).Unwrap", Method, 23, ""}, + {"(*Dialer).Dial", Method, 1, ""}, + {"(*Dialer).DialContext", Method, 7, ""}, + {"(*Dialer).MultipathTCP", Method, 21, ""}, + {"(*Dialer).SetMultipathTCP", Method, 21, ""}, + {"(*IP).UnmarshalText", Method, 2, ""}, + {"(*IPAddr).Network", Method, 0, ""}, + {"(*IPAddr).String", Method, 0, ""}, + {"(*IPConn).Close", Method, 0, ""}, + {"(*IPConn).File", Method, 0, ""}, + {"(*IPConn).LocalAddr", Method, 0, ""}, + {"(*IPConn).Read", Method, 0, ""}, + {"(*IPConn).ReadFrom", Method, 0, ""}, + {"(*IPConn).ReadFromIP", Method, 0, ""}, + {"(*IPConn).ReadMsgIP", Method, 1, ""}, + {"(*IPConn).RemoteAddr", Method, 0, ""}, + {"(*IPConn).SetDeadline", Method, 0, ""}, + {"(*IPConn).SetReadBuffer", Method, 0, ""}, + {"(*IPConn).SetReadDeadline", Method, 0, ""}, + {"(*IPConn).SetWriteBuffer", Method, 0, ""}, + {"(*IPConn).SetWriteDeadline", Method, 0, ""}, + {"(*IPConn).SyscallConn", Method, 9, ""}, + {"(*IPConn).Write", Method, 0, ""}, + {"(*IPConn).WriteMsgIP", Method, 1, ""}, + {"(*IPConn).WriteTo", Method, 0, ""}, + {"(*IPConn).WriteToIP", Method, 0, ""}, + {"(*IPNet).Contains", Method, 0, ""}, + {"(*IPNet).Network", Method, 0, ""}, + {"(*IPNet).String", Method, 0, ""}, + {"(*Interface).Addrs", Method, 0, ""}, + {"(*Interface).MulticastAddrs", Method, 0, ""}, + {"(*ListenConfig).Listen", Method, 11, ""}, + {"(*ListenConfig).ListenPacket", Method, 11, ""}, + {"(*ListenConfig).MultipathTCP", Method, 21, ""}, + {"(*ListenConfig).SetMultipathTCP", Method, 21, ""}, + {"(*OpError).Error", Method, 0, ""}, + {"(*OpError).Temporary", Method, 0, ""}, + {"(*OpError).Timeout", Method, 0, ""}, + {"(*OpError).Unwrap", Method, 13, ""}, + {"(*ParseError).Error", Method, 0, ""}, + {"(*ParseError).Temporary", Method, 17, ""}, + {"(*ParseError).Timeout", Method, 17, ""}, + {"(*Resolver).LookupAddr", Method, 8, ""}, + {"(*Resolver).LookupCNAME", Method, 8, ""}, + {"(*Resolver).LookupHost", Method, 8, ""}, + {"(*Resolver).LookupIP", Method, 15, ""}, + {"(*Resolver).LookupIPAddr", Method, 8, ""}, + {"(*Resolver).LookupMX", Method, 8, ""}, + {"(*Resolver).LookupNS", Method, 8, ""}, + {"(*Resolver).LookupNetIP", Method, 18, ""}, + {"(*Resolver).LookupPort", Method, 8, ""}, + {"(*Resolver).LookupSRV", Method, 8, ""}, + {"(*Resolver).LookupTXT", Method, 8, ""}, + {"(*TCPAddr).AddrPort", Method, 18, ""}, + {"(*TCPAddr).Network", Method, 0, ""}, + {"(*TCPAddr).String", Method, 0, ""}, + {"(*TCPConn).Close", Method, 0, ""}, + {"(*TCPConn).CloseRead", Method, 0, ""}, + {"(*TCPConn).CloseWrite", Method, 0, ""}, + {"(*TCPConn).File", Method, 0, ""}, + {"(*TCPConn).LocalAddr", Method, 0, ""}, + {"(*TCPConn).MultipathTCP", Method, 21, ""}, + {"(*TCPConn).Read", Method, 0, ""}, + {"(*TCPConn).ReadFrom", Method, 0, ""}, + {"(*TCPConn).RemoteAddr", Method, 0, ""}, + {"(*TCPConn).SetDeadline", Method, 0, ""}, + {"(*TCPConn).SetKeepAlive", Method, 0, ""}, + {"(*TCPConn).SetKeepAliveConfig", Method, 23, ""}, + {"(*TCPConn).SetKeepAlivePeriod", Method, 2, ""}, + {"(*TCPConn).SetLinger", Method, 0, ""}, + {"(*TCPConn).SetNoDelay", Method, 0, ""}, + {"(*TCPConn).SetReadBuffer", Method, 0, ""}, + {"(*TCPConn).SetReadDeadline", Method, 0, ""}, + {"(*TCPConn).SetWriteBuffer", Method, 0, ""}, + {"(*TCPConn).SetWriteDeadline", Method, 0, ""}, + {"(*TCPConn).SyscallConn", Method, 9, ""}, + {"(*TCPConn).Write", Method, 0, ""}, + {"(*TCPConn).WriteTo", Method, 22, ""}, + {"(*TCPListener).Accept", Method, 0, ""}, + {"(*TCPListener).AcceptTCP", Method, 0, ""}, + {"(*TCPListener).Addr", Method, 0, ""}, + {"(*TCPListener).Close", Method, 0, ""}, + {"(*TCPListener).File", Method, 0, ""}, + {"(*TCPListener).SetDeadline", Method, 0, ""}, + {"(*TCPListener).SyscallConn", Method, 10, ""}, + {"(*UDPAddr).AddrPort", Method, 18, ""}, + {"(*UDPAddr).Network", Method, 0, ""}, + {"(*UDPAddr).String", Method, 0, ""}, + {"(*UDPConn).Close", Method, 0, ""}, + {"(*UDPConn).File", Method, 0, ""}, + {"(*UDPConn).LocalAddr", Method, 0, ""}, + {"(*UDPConn).Read", Method, 0, ""}, + {"(*UDPConn).ReadFrom", Method, 0, ""}, + {"(*UDPConn).ReadFromUDP", Method, 0, ""}, + {"(*UDPConn).ReadFromUDPAddrPort", Method, 18, ""}, + {"(*UDPConn).ReadMsgUDP", Method, 1, ""}, + {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18, ""}, + {"(*UDPConn).RemoteAddr", Method, 0, ""}, + {"(*UDPConn).SetDeadline", Method, 0, ""}, + {"(*UDPConn).SetReadBuffer", Method, 0, ""}, + {"(*UDPConn).SetReadDeadline", Method, 0, ""}, + {"(*UDPConn).SetWriteBuffer", Method, 0, ""}, + {"(*UDPConn).SetWriteDeadline", Method, 0, ""}, + {"(*UDPConn).SyscallConn", Method, 9, ""}, + {"(*UDPConn).Write", Method, 0, ""}, + {"(*UDPConn).WriteMsgUDP", Method, 1, ""}, + {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18, ""}, + {"(*UDPConn).WriteTo", Method, 0, ""}, + {"(*UDPConn).WriteToUDP", Method, 0, ""}, + {"(*UDPConn).WriteToUDPAddrPort", Method, 18, ""}, + {"(*UnixAddr).Network", Method, 0, ""}, + {"(*UnixAddr).String", Method, 0, ""}, + {"(*UnixConn).Close", Method, 0, ""}, + {"(*UnixConn).CloseRead", Method, 1, ""}, + {"(*UnixConn).CloseWrite", Method, 1, ""}, + {"(*UnixConn).File", Method, 0, ""}, + {"(*UnixConn).LocalAddr", Method, 0, ""}, + {"(*UnixConn).Read", Method, 0, ""}, + {"(*UnixConn).ReadFrom", Method, 0, ""}, + {"(*UnixConn).ReadFromUnix", Method, 0, ""}, + {"(*UnixConn).ReadMsgUnix", Method, 0, ""}, + {"(*UnixConn).RemoteAddr", Method, 0, ""}, + {"(*UnixConn).SetDeadline", Method, 0, ""}, + {"(*UnixConn).SetReadBuffer", Method, 0, ""}, + {"(*UnixConn).SetReadDeadline", Method, 0, ""}, + {"(*UnixConn).SetWriteBuffer", Method, 0, ""}, + {"(*UnixConn).SetWriteDeadline", Method, 0, ""}, + {"(*UnixConn).SyscallConn", Method, 9, ""}, + {"(*UnixConn).Write", Method, 0, ""}, + {"(*UnixConn).WriteMsgUnix", Method, 0, ""}, + {"(*UnixConn).WriteTo", Method, 0, ""}, + {"(*UnixConn).WriteToUnix", Method, 0, ""}, + {"(*UnixListener).Accept", Method, 0, ""}, + {"(*UnixListener).AcceptUnix", Method, 0, ""}, + {"(*UnixListener).Addr", Method, 0, ""}, + {"(*UnixListener).Close", Method, 0, ""}, + {"(*UnixListener).File", Method, 0, ""}, + {"(*UnixListener).SetDeadline", Method, 0, ""}, + {"(*UnixListener).SetUnlinkOnClose", Method, 8, ""}, + {"(*UnixListener).SyscallConn", Method, 10, ""}, + {"(Flags).String", Method, 0, ""}, + {"(HardwareAddr).String", Method, 0, ""}, + {"(IP).AppendText", Method, 24, ""}, + {"(IP).DefaultMask", Method, 0, ""}, + {"(IP).Equal", Method, 0, ""}, + {"(IP).IsGlobalUnicast", Method, 0, ""}, + {"(IP).IsInterfaceLocalMulticast", Method, 0, ""}, + {"(IP).IsLinkLocalMulticast", Method, 0, ""}, + {"(IP).IsLinkLocalUnicast", Method, 0, ""}, + {"(IP).IsLoopback", Method, 0, ""}, + {"(IP).IsMulticast", Method, 0, ""}, + {"(IP).IsPrivate", Method, 17, ""}, + {"(IP).IsUnspecified", Method, 0, ""}, + {"(IP).MarshalText", Method, 2, ""}, + {"(IP).Mask", Method, 0, ""}, + {"(IP).String", Method, 0, ""}, + {"(IP).To16", Method, 0, ""}, + {"(IP).To4", Method, 0, ""}, + {"(IPMask).Size", Method, 0, ""}, + {"(IPMask).String", Method, 0, ""}, + {"(InvalidAddrError).Error", Method, 0, ""}, + {"(InvalidAddrError).Temporary", Method, 0, ""}, + {"(InvalidAddrError).Timeout", Method, 0, ""}, + {"(UnknownNetworkError).Error", Method, 0, ""}, + {"(UnknownNetworkError).Temporary", Method, 0, ""}, + {"(UnknownNetworkError).Timeout", Method, 0, ""}, + {"Addr", Type, 0, ""}, + {"AddrError", Type, 0, ""}, + {"AddrError.Addr", Field, 0, ""}, + {"AddrError.Err", Field, 0, ""}, + {"Buffers", Type, 8, ""}, + {"CIDRMask", Func, 0, "func(ones int, bits int) IPMask"}, + {"Conn", Type, 0, ""}, + {"DNSConfigError", Type, 0, ""}, + {"DNSConfigError.Err", Field, 0, ""}, + {"DNSError", Type, 0, ""}, + {"DNSError.Err", Field, 0, ""}, + {"DNSError.IsNotFound", Field, 13, ""}, + {"DNSError.IsTemporary", Field, 6, ""}, + {"DNSError.IsTimeout", Field, 0, ""}, + {"DNSError.Name", Field, 0, ""}, + {"DNSError.Server", Field, 0, ""}, + {"DNSError.UnwrapErr", Field, 23, ""}, + {"DefaultResolver", Var, 8, ""}, + {"Dial", Func, 0, "func(network string, address string) (Conn, error)"}, + {"DialIP", Func, 0, "func(network string, laddr *IPAddr, raddr *IPAddr) (*IPConn, error)"}, + {"DialTCP", Func, 0, "func(network string, laddr *TCPAddr, raddr *TCPAddr) (*TCPConn, error)"}, + {"DialTimeout", Func, 0, "func(network string, address string, timeout time.Duration) (Conn, error)"}, + {"DialUDP", Func, 0, "func(network string, laddr *UDPAddr, raddr *UDPAddr) (*UDPConn, error)"}, + {"DialUnix", Func, 0, "func(network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error)"}, + {"Dialer", Type, 1, ""}, + {"Dialer.Cancel", Field, 6, ""}, + {"Dialer.Control", Field, 11, ""}, + {"Dialer.ControlContext", Field, 20, ""}, + {"Dialer.Deadline", Field, 1, ""}, + {"Dialer.DualStack", Field, 2, ""}, + {"Dialer.FallbackDelay", Field, 5, ""}, + {"Dialer.KeepAlive", Field, 3, ""}, + {"Dialer.KeepAliveConfig", Field, 23, ""}, + {"Dialer.LocalAddr", Field, 1, ""}, + {"Dialer.Resolver", Field, 8, ""}, + {"Dialer.Timeout", Field, 1, ""}, + {"ErrClosed", Var, 16, ""}, + {"ErrWriteToConnected", Var, 0, ""}, + {"Error", Type, 0, ""}, + {"FileConn", Func, 0, "func(f *os.File) (c Conn, err error)"}, + {"FileListener", Func, 0, "func(f *os.File) (ln Listener, err error)"}, + {"FilePacketConn", Func, 0, "func(f *os.File) (c PacketConn, err error)"}, + {"FlagBroadcast", Const, 0, ""}, + {"FlagLoopback", Const, 0, ""}, + {"FlagMulticast", Const, 0, ""}, + {"FlagPointToPoint", Const, 0, ""}, + {"FlagRunning", Const, 20, ""}, + {"FlagUp", Const, 0, ""}, + {"Flags", Type, 0, ""}, + {"HardwareAddr", Type, 0, ""}, + {"IP", Type, 0, ""}, + {"IPAddr", Type, 0, ""}, + {"IPAddr.IP", Field, 0, ""}, + {"IPAddr.Zone", Field, 1, ""}, + {"IPConn", Type, 0, ""}, + {"IPMask", Type, 0, ""}, + {"IPNet", Type, 0, ""}, + {"IPNet.IP", Field, 0, ""}, + {"IPNet.Mask", Field, 0, ""}, + {"IPv4", Func, 0, "func(a byte, b byte, c byte, d byte) IP"}, + {"IPv4Mask", Func, 0, "func(a byte, b byte, c byte, d byte) IPMask"}, + {"IPv4allrouter", Var, 0, ""}, + {"IPv4allsys", Var, 0, ""}, + {"IPv4bcast", Var, 0, ""}, + {"IPv4len", Const, 0, ""}, + {"IPv4zero", Var, 0, ""}, + {"IPv6interfacelocalallnodes", Var, 0, ""}, + {"IPv6len", Const, 0, ""}, + {"IPv6linklocalallnodes", Var, 0, ""}, + {"IPv6linklocalallrouters", Var, 0, ""}, + {"IPv6loopback", Var, 0, ""}, + {"IPv6unspecified", Var, 0, ""}, + {"IPv6zero", Var, 0, ""}, + {"Interface", Type, 0, ""}, + {"Interface.Flags", Field, 0, ""}, + {"Interface.HardwareAddr", Field, 0, ""}, + {"Interface.Index", Field, 0, ""}, + {"Interface.MTU", Field, 0, ""}, + {"Interface.Name", Field, 0, ""}, + {"InterfaceAddrs", Func, 0, "func() ([]Addr, error)"}, + {"InterfaceByIndex", Func, 0, "func(index int) (*Interface, error)"}, + {"InterfaceByName", Func, 0, "func(name string) (*Interface, error)"}, + {"Interfaces", Func, 0, "func() ([]Interface, error)"}, + {"InvalidAddrError", Type, 0, ""}, + {"JoinHostPort", Func, 0, "func(host string, port string) string"}, + {"KeepAliveConfig", Type, 23, ""}, + {"KeepAliveConfig.Count", Field, 23, ""}, + {"KeepAliveConfig.Enable", Field, 23, ""}, + {"KeepAliveConfig.Idle", Field, 23, ""}, + {"KeepAliveConfig.Interval", Field, 23, ""}, + {"Listen", Func, 0, "func(network string, address string) (Listener, error)"}, + {"ListenConfig", Type, 11, ""}, + {"ListenConfig.Control", Field, 11, ""}, + {"ListenConfig.KeepAlive", Field, 13, ""}, + {"ListenConfig.KeepAliveConfig", Field, 23, ""}, + {"ListenIP", Func, 0, "func(network string, laddr *IPAddr) (*IPConn, error)"}, + {"ListenMulticastUDP", Func, 0, "func(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error)"}, + {"ListenPacket", Func, 0, "func(network string, address string) (PacketConn, error)"}, + {"ListenTCP", Func, 0, "func(network string, laddr *TCPAddr) (*TCPListener, error)"}, + {"ListenUDP", Func, 0, "func(network string, laddr *UDPAddr) (*UDPConn, error)"}, + {"ListenUnix", Func, 0, "func(network string, laddr *UnixAddr) (*UnixListener, error)"}, + {"ListenUnixgram", Func, 0, "func(network string, laddr *UnixAddr) (*UnixConn, error)"}, + {"Listener", Type, 0, ""}, + {"LookupAddr", Func, 0, "func(addr string) (names []string, err error)"}, + {"LookupCNAME", Func, 0, "func(host string) (cname string, err error)"}, + {"LookupHost", Func, 0, "func(host string) (addrs []string, err error)"}, + {"LookupIP", Func, 0, "func(host string) ([]IP, error)"}, + {"LookupMX", Func, 0, "func(name string) ([]*MX, error)"}, + {"LookupNS", Func, 1, "func(name string) ([]*NS, error)"}, + {"LookupPort", Func, 0, "func(network string, service string) (port int, err error)"}, + {"LookupSRV", Func, 0, "func(service string, proto string, name string) (cname string, addrs []*SRV, err error)"}, + {"LookupTXT", Func, 0, "func(name string) ([]string, error)"}, + {"MX", Type, 0, ""}, + {"MX.Host", Field, 0, ""}, + {"MX.Pref", Field, 0, ""}, + {"NS", Type, 1, ""}, + {"NS.Host", Field, 1, ""}, + {"OpError", Type, 0, ""}, + {"OpError.Addr", Field, 0, ""}, + {"OpError.Err", Field, 0, ""}, + {"OpError.Net", Field, 0, ""}, + {"OpError.Op", Field, 0, ""}, + {"OpError.Source", Field, 5, ""}, + {"PacketConn", Type, 0, ""}, + {"ParseCIDR", Func, 0, "func(s string) (IP, *IPNet, error)"}, + {"ParseError", Type, 0, ""}, + {"ParseError.Text", Field, 0, ""}, + {"ParseError.Type", Field, 0, ""}, + {"ParseIP", Func, 0, "func(s string) IP"}, + {"ParseMAC", Func, 0, "func(s string) (hw HardwareAddr, err error)"}, + {"Pipe", Func, 0, "func() (Conn, Conn)"}, + {"ResolveIPAddr", Func, 0, "func(network string, address string) (*IPAddr, error)"}, + {"ResolveTCPAddr", Func, 0, "func(network string, address string) (*TCPAddr, error)"}, + {"ResolveUDPAddr", Func, 0, "func(network string, address string) (*UDPAddr, error)"}, + {"ResolveUnixAddr", Func, 0, "func(network string, address string) (*UnixAddr, error)"}, + {"Resolver", Type, 8, ""}, + {"Resolver.Dial", Field, 9, ""}, + {"Resolver.PreferGo", Field, 8, ""}, + {"Resolver.StrictErrors", Field, 9, ""}, + {"SRV", Type, 0, ""}, + {"SRV.Port", Field, 0, ""}, + {"SRV.Priority", Field, 0, ""}, + {"SRV.Target", Field, 0, ""}, + {"SRV.Weight", Field, 0, ""}, + {"SplitHostPort", Func, 0, "func(hostport string) (host string, port string, err error)"}, + {"TCPAddr", Type, 0, ""}, + {"TCPAddr.IP", Field, 0, ""}, + {"TCPAddr.Port", Field, 0, ""}, + {"TCPAddr.Zone", Field, 1, ""}, + {"TCPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *TCPAddr"}, + {"TCPConn", Type, 0, ""}, + {"TCPListener", Type, 0, ""}, + {"UDPAddr", Type, 0, ""}, + {"UDPAddr.IP", Field, 0, ""}, + {"UDPAddr.Port", Field, 0, ""}, + {"UDPAddr.Zone", Field, 1, ""}, + {"UDPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *UDPAddr"}, + {"UDPConn", Type, 0, ""}, + {"UnixAddr", Type, 0, ""}, + {"UnixAddr.Name", Field, 0, ""}, + {"UnixAddr.Net", Field, 0, ""}, + {"UnixConn", Type, 0, ""}, + {"UnixListener", Type, 0, ""}, + {"UnknownNetworkError", Type, 0, ""}, + }, + "net/http": { + {"(*Client).CloseIdleConnections", Method, 12, ""}, + {"(*Client).Do", Method, 0, ""}, + {"(*Client).Get", Method, 0, ""}, + {"(*Client).Head", Method, 0, ""}, + {"(*Client).Post", Method, 0, ""}, + {"(*Client).PostForm", Method, 0, ""}, + {"(*Cookie).String", Method, 0, ""}, + {"(*Cookie).Valid", Method, 18, ""}, + {"(*MaxBytesError).Error", Method, 19, ""}, + {"(*ProtocolError).Error", Method, 0, ""}, + {"(*ProtocolError).Is", Method, 21, ""}, + {"(*Protocols).SetHTTP1", Method, 24, ""}, + {"(*Protocols).SetHTTP2", Method, 24, ""}, + {"(*Protocols).SetUnencryptedHTTP2", Method, 24, ""}, + {"(*Request).AddCookie", Method, 0, ""}, + {"(*Request).BasicAuth", Method, 4, ""}, + {"(*Request).Clone", Method, 13, ""}, + {"(*Request).Context", Method, 7, ""}, + {"(*Request).Cookie", Method, 0, ""}, + {"(*Request).Cookies", Method, 0, ""}, + {"(*Request).CookiesNamed", Method, 23, ""}, + {"(*Request).FormFile", Method, 0, ""}, + {"(*Request).FormValue", Method, 0, ""}, + {"(*Request).MultipartReader", Method, 0, ""}, + {"(*Request).ParseForm", Method, 0, ""}, + {"(*Request).ParseMultipartForm", Method, 0, ""}, + {"(*Request).PathValue", Method, 22, ""}, + {"(*Request).PostFormValue", Method, 1, ""}, + {"(*Request).ProtoAtLeast", Method, 0, ""}, + {"(*Request).Referer", Method, 0, ""}, + {"(*Request).SetBasicAuth", Method, 0, ""}, + {"(*Request).SetPathValue", Method, 22, ""}, + {"(*Request).UserAgent", Method, 0, ""}, + {"(*Request).WithContext", Method, 7, ""}, + {"(*Request).Write", Method, 0, ""}, + {"(*Request).WriteProxy", Method, 0, ""}, + {"(*Response).Cookies", Method, 0, ""}, + {"(*Response).Location", Method, 0, ""}, + {"(*Response).ProtoAtLeast", Method, 0, ""}, + {"(*Response).Write", Method, 0, ""}, + {"(*ResponseController).EnableFullDuplex", Method, 21, ""}, + {"(*ResponseController).Flush", Method, 20, ""}, + {"(*ResponseController).Hijack", Method, 20, ""}, + {"(*ResponseController).SetReadDeadline", Method, 20, ""}, + {"(*ResponseController).SetWriteDeadline", Method, 20, ""}, + {"(*ServeMux).Handle", Method, 0, ""}, + {"(*ServeMux).HandleFunc", Method, 0, ""}, + {"(*ServeMux).Handler", Method, 1, ""}, + {"(*ServeMux).ServeHTTP", Method, 0, ""}, + {"(*Server).Close", Method, 8, ""}, + {"(*Server).ListenAndServe", Method, 0, ""}, + {"(*Server).ListenAndServeTLS", Method, 0, ""}, + {"(*Server).RegisterOnShutdown", Method, 9, ""}, + {"(*Server).Serve", Method, 0, ""}, + {"(*Server).ServeTLS", Method, 9, ""}, + {"(*Server).SetKeepAlivesEnabled", Method, 3, ""}, + {"(*Server).Shutdown", Method, 8, ""}, + {"(*Transport).CancelRequest", Method, 1, ""}, + {"(*Transport).Clone", Method, 13, ""}, + {"(*Transport).CloseIdleConnections", Method, 0, ""}, + {"(*Transport).RegisterProtocol", Method, 0, ""}, + {"(*Transport).RoundTrip", Method, 0, ""}, + {"(ConnState).String", Method, 3, ""}, + {"(Dir).Open", Method, 0, ""}, + {"(HandlerFunc).ServeHTTP", Method, 0, ""}, + {"(Header).Add", Method, 0, ""}, + {"(Header).Clone", Method, 13, ""}, + {"(Header).Del", Method, 0, ""}, + {"(Header).Get", Method, 0, ""}, + {"(Header).Set", Method, 0, ""}, + {"(Header).Values", Method, 14, ""}, + {"(Header).Write", Method, 0, ""}, + {"(Header).WriteSubset", Method, 0, ""}, + {"(Protocols).HTTP1", Method, 24, ""}, + {"(Protocols).HTTP2", Method, 24, ""}, + {"(Protocols).String", Method, 24, ""}, + {"(Protocols).UnencryptedHTTP2", Method, 24, ""}, + {"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"}, + {"CanonicalHeaderKey", Func, 0, "func(s string) string"}, + {"Client", Type, 0, ""}, + {"Client.CheckRedirect", Field, 0, ""}, + {"Client.Jar", Field, 0, ""}, + {"Client.Timeout", Field, 3, ""}, + {"Client.Transport", Field, 0, ""}, + {"CloseNotifier", Type, 1, ""}, + {"ConnState", Type, 3, ""}, + {"Cookie", Type, 0, ""}, + {"Cookie.Domain", Field, 0, ""}, + {"Cookie.Expires", Field, 0, ""}, + {"Cookie.HttpOnly", Field, 0, ""}, + {"Cookie.MaxAge", Field, 0, ""}, + {"Cookie.Name", Field, 0, ""}, + {"Cookie.Partitioned", Field, 23, ""}, + {"Cookie.Path", Field, 0, ""}, + {"Cookie.Quoted", Field, 23, ""}, + {"Cookie.Raw", Field, 0, ""}, + {"Cookie.RawExpires", Field, 0, ""}, + {"Cookie.SameSite", Field, 11, ""}, + {"Cookie.Secure", Field, 0, ""}, + {"Cookie.Unparsed", Field, 0, ""}, + {"Cookie.Value", Field, 0, ""}, + {"CookieJar", Type, 0, ""}, + {"DefaultClient", Var, 0, ""}, + {"DefaultMaxHeaderBytes", Const, 0, ""}, + {"DefaultMaxIdleConnsPerHost", Const, 0, ""}, + {"DefaultServeMux", Var, 0, ""}, + {"DefaultTransport", Var, 0, ""}, + {"DetectContentType", Func, 0, "func(data []byte) string"}, + {"Dir", Type, 0, ""}, + {"ErrAbortHandler", Var, 8, ""}, + {"ErrBodyNotAllowed", Var, 0, ""}, + {"ErrBodyReadAfterClose", Var, 0, ""}, + {"ErrContentLength", Var, 0, ""}, + {"ErrHandlerTimeout", Var, 0, ""}, + {"ErrHeaderTooLong", Var, 0, ""}, + {"ErrHijacked", Var, 0, ""}, + {"ErrLineTooLong", Var, 0, ""}, + {"ErrMissingBoundary", Var, 0, ""}, + {"ErrMissingContentLength", Var, 0, ""}, + {"ErrMissingFile", Var, 0, ""}, + {"ErrNoCookie", Var, 0, ""}, + {"ErrNoLocation", Var, 0, ""}, + {"ErrNotMultipart", Var, 0, ""}, + {"ErrNotSupported", Var, 0, ""}, + {"ErrSchemeMismatch", Var, 21, ""}, + {"ErrServerClosed", Var, 8, ""}, + {"ErrShortBody", Var, 0, ""}, + {"ErrSkipAltProtocol", Var, 6, ""}, + {"ErrUnexpectedTrailer", Var, 0, ""}, + {"ErrUseLastResponse", Var, 7, ""}, + {"ErrWriteAfterFlush", Var, 0, ""}, + {"Error", Func, 0, "func(w ResponseWriter, error string, code int)"}, + {"FS", Func, 16, "func(fsys fs.FS) FileSystem"}, + {"File", Type, 0, ""}, + {"FileServer", Func, 0, "func(root FileSystem) Handler"}, + {"FileServerFS", Func, 22, "func(root fs.FS) Handler"}, + {"FileSystem", Type, 0, ""}, + {"Flusher", Type, 0, ""}, + {"Get", Func, 0, "func(url string) (resp *Response, err error)"}, + {"HTTP2Config", Type, 24, ""}, + {"HTTP2Config.CountError", Field, 24, ""}, + {"HTTP2Config.MaxConcurrentStreams", Field, 24, ""}, + {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24, ""}, + {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24, ""}, + {"HTTP2Config.MaxReadFrameSize", Field, 24, ""}, + {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24, ""}, + {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24, ""}, + {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""}, + {"HTTP2Config.PingTimeout", Field, 24, ""}, + {"HTTP2Config.SendPingTimeout", Field, 24, ""}, + {"HTTP2Config.WriteByteTimeout", Field, 24, ""}, + {"Handle", Func, 0, "func(pattern string, handler Handler)"}, + {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"}, + {"Handler", Type, 0, ""}, + {"HandlerFunc", Type, 0, ""}, + {"Head", Func, 0, "func(url string) (resp *Response, err error)"}, + {"Header", Type, 0, ""}, + {"Hijacker", Type, 0, ""}, + {"ListenAndServe", Func, 0, "func(addr string, handler Handler) error"}, + {"ListenAndServeTLS", Func, 0, "func(addr string, certFile string, keyFile string, handler Handler) error"}, + {"LocalAddrContextKey", Var, 7, ""}, + {"MaxBytesError", Type, 19, ""}, + {"MaxBytesError.Limit", Field, 19, ""}, + {"MaxBytesHandler", Func, 18, "func(h Handler, n int64) Handler"}, + {"MaxBytesReader", Func, 0, "func(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser"}, + {"MethodConnect", Const, 6, ""}, + {"MethodDelete", Const, 6, ""}, + {"MethodGet", Const, 6, ""}, + {"MethodHead", Const, 6, ""}, + {"MethodOptions", Const, 6, ""}, + {"MethodPatch", Const, 6, ""}, + {"MethodPost", Const, 6, ""}, + {"MethodPut", Const, 6, ""}, + {"MethodTrace", Const, 6, ""}, + {"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"}, + {"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"}, + {"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"}, + {"NewRequestWithContext", Func, 13, "func(ctx context.Context, method string, url string, body io.Reader) (*Request, error)"}, + {"NewResponseController", Func, 20, "func(rw ResponseWriter) *ResponseController"}, + {"NewServeMux", Func, 0, "func() *ServeMux"}, + {"NoBody", Var, 8, ""}, + {"NotFound", Func, 0, "func(w ResponseWriter, r *Request)"}, + {"NotFoundHandler", Func, 0, "func() Handler"}, + {"ParseCookie", Func, 23, "func(line string) ([]*Cookie, error)"}, + {"ParseHTTPVersion", Func, 0, "func(vers string) (major int, minor int, ok bool)"}, + {"ParseSetCookie", Func, 23, "func(line string) (*Cookie, error)"}, + {"ParseTime", Func, 1, "func(text string) (t time.Time, err error)"}, + {"Post", Func, 0, "func(url string, contentType string, body io.Reader) (resp *Response, err error)"}, + {"PostForm", Func, 0, "func(url string, data url.Values) (resp *Response, err error)"}, + {"ProtocolError", Type, 0, ""}, + {"ProtocolError.ErrorString", Field, 0, ""}, + {"Protocols", Type, 24, ""}, + {"ProxyFromEnvironment", Func, 0, "func(req *Request) (*url.URL, error)"}, + {"ProxyURL", Func, 0, "func(fixedURL *url.URL) func(*Request) (*url.URL, error)"}, + {"PushOptions", Type, 8, ""}, + {"PushOptions.Header", Field, 8, ""}, + {"PushOptions.Method", Field, 8, ""}, + {"Pusher", Type, 8, ""}, + {"ReadRequest", Func, 0, "func(b *bufio.Reader) (*Request, error)"}, + {"ReadResponse", Func, 0, "func(r *bufio.Reader, req *Request) (*Response, error)"}, + {"Redirect", Func, 0, "func(w ResponseWriter, r *Request, url string, code int)"}, + {"RedirectHandler", Func, 0, "func(url string, code int) Handler"}, + {"Request", Type, 0, ""}, + {"Request.Body", Field, 0, ""}, + {"Request.Cancel", Field, 5, ""}, + {"Request.Close", Field, 0, ""}, + {"Request.ContentLength", Field, 0, ""}, + {"Request.Form", Field, 0, ""}, + {"Request.GetBody", Field, 8, ""}, + {"Request.Header", Field, 0, ""}, + {"Request.Host", Field, 0, ""}, + {"Request.Method", Field, 0, ""}, + {"Request.MultipartForm", Field, 0, ""}, + {"Request.Pattern", Field, 23, ""}, + {"Request.PostForm", Field, 1, ""}, + {"Request.Proto", Field, 0, ""}, + {"Request.ProtoMajor", Field, 0, ""}, + {"Request.ProtoMinor", Field, 0, ""}, + {"Request.RemoteAddr", Field, 0, ""}, + {"Request.RequestURI", Field, 0, ""}, + {"Request.Response", Field, 7, ""}, + {"Request.TLS", Field, 0, ""}, + {"Request.Trailer", Field, 0, ""}, + {"Request.TransferEncoding", Field, 0, ""}, + {"Request.URL", Field, 0, ""}, + {"Response", Type, 0, ""}, + {"Response.Body", Field, 0, ""}, + {"Response.Close", Field, 0, ""}, + {"Response.ContentLength", Field, 0, ""}, + {"Response.Header", Field, 0, ""}, + {"Response.Proto", Field, 0, ""}, + {"Response.ProtoMajor", Field, 0, ""}, + {"Response.ProtoMinor", Field, 0, ""}, + {"Response.Request", Field, 0, ""}, + {"Response.Status", Field, 0, ""}, + {"Response.StatusCode", Field, 0, ""}, + {"Response.TLS", Field, 3, ""}, + {"Response.Trailer", Field, 0, ""}, + {"Response.TransferEncoding", Field, 0, ""}, + {"Response.Uncompressed", Field, 7, ""}, + {"ResponseController", Type, 20, ""}, + {"ResponseWriter", Type, 0, ""}, + {"RoundTripper", Type, 0, ""}, + {"SameSite", Type, 11, ""}, + {"SameSiteDefaultMode", Const, 11, ""}, + {"SameSiteLaxMode", Const, 11, ""}, + {"SameSiteNoneMode", Const, 13, ""}, + {"SameSiteStrictMode", Const, 11, ""}, + {"Serve", Func, 0, "func(l net.Listener, handler Handler) error"}, + {"ServeContent", Func, 0, "func(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker)"}, + {"ServeFile", Func, 0, "func(w ResponseWriter, r *Request, name string)"}, + {"ServeFileFS", Func, 22, "func(w ResponseWriter, r *Request, fsys fs.FS, name string)"}, + {"ServeMux", Type, 0, ""}, + {"ServeTLS", Func, 9, "func(l net.Listener, handler Handler, certFile string, keyFile string) error"}, + {"Server", Type, 0, ""}, + {"Server.Addr", Field, 0, ""}, + {"Server.BaseContext", Field, 13, ""}, + {"Server.ConnContext", Field, 13, ""}, + {"Server.ConnState", Field, 3, ""}, + {"Server.DisableGeneralOptionsHandler", Field, 20, ""}, + {"Server.ErrorLog", Field, 3, ""}, + {"Server.HTTP2", Field, 24, ""}, + {"Server.Handler", Field, 0, ""}, + {"Server.IdleTimeout", Field, 8, ""}, + {"Server.MaxHeaderBytes", Field, 0, ""}, + {"Server.Protocols", Field, 24, ""}, + {"Server.ReadHeaderTimeout", Field, 8, ""}, + {"Server.ReadTimeout", Field, 0, ""}, + {"Server.TLSConfig", Field, 0, ""}, + {"Server.TLSNextProto", Field, 1, ""}, + {"Server.WriteTimeout", Field, 0, ""}, + {"ServerContextKey", Var, 7, ""}, + {"SetCookie", Func, 0, "func(w ResponseWriter, cookie *Cookie)"}, + {"StateActive", Const, 3, ""}, + {"StateClosed", Const, 3, ""}, + {"StateHijacked", Const, 3, ""}, + {"StateIdle", Const, 3, ""}, + {"StateNew", Const, 3, ""}, + {"StatusAccepted", Const, 0, ""}, + {"StatusAlreadyReported", Const, 7, ""}, + {"StatusBadGateway", Const, 0, ""}, + {"StatusBadRequest", Const, 0, ""}, + {"StatusConflict", Const, 0, ""}, + {"StatusContinue", Const, 0, ""}, + {"StatusCreated", Const, 0, ""}, + {"StatusEarlyHints", Const, 13, ""}, + {"StatusExpectationFailed", Const, 0, ""}, + {"StatusFailedDependency", Const, 7, ""}, + {"StatusForbidden", Const, 0, ""}, + {"StatusFound", Const, 0, ""}, + {"StatusGatewayTimeout", Const, 0, ""}, + {"StatusGone", Const, 0, ""}, + {"StatusHTTPVersionNotSupported", Const, 0, ""}, + {"StatusIMUsed", Const, 7, ""}, + {"StatusInsufficientStorage", Const, 7, ""}, + {"StatusInternalServerError", Const, 0, ""}, + {"StatusLengthRequired", Const, 0, ""}, + {"StatusLocked", Const, 7, ""}, + {"StatusLoopDetected", Const, 7, ""}, + {"StatusMethodNotAllowed", Const, 0, ""}, + {"StatusMisdirectedRequest", Const, 11, ""}, + {"StatusMovedPermanently", Const, 0, ""}, + {"StatusMultiStatus", Const, 7, ""}, + {"StatusMultipleChoices", Const, 0, ""}, + {"StatusNetworkAuthenticationRequired", Const, 6, ""}, + {"StatusNoContent", Const, 0, ""}, + {"StatusNonAuthoritativeInfo", Const, 0, ""}, + {"StatusNotAcceptable", Const, 0, ""}, + {"StatusNotExtended", Const, 7, ""}, + {"StatusNotFound", Const, 0, ""}, + {"StatusNotImplemented", Const, 0, ""}, + {"StatusNotModified", Const, 0, ""}, + {"StatusOK", Const, 0, ""}, + {"StatusPartialContent", Const, 0, ""}, + {"StatusPaymentRequired", Const, 0, ""}, + {"StatusPermanentRedirect", Const, 7, ""}, + {"StatusPreconditionFailed", Const, 0, ""}, + {"StatusPreconditionRequired", Const, 6, ""}, + {"StatusProcessing", Const, 7, ""}, + {"StatusProxyAuthRequired", Const, 0, ""}, + {"StatusRequestEntityTooLarge", Const, 0, ""}, + {"StatusRequestHeaderFieldsTooLarge", Const, 6, ""}, + {"StatusRequestTimeout", Const, 0, ""}, + {"StatusRequestURITooLong", Const, 0, ""}, + {"StatusRequestedRangeNotSatisfiable", Const, 0, ""}, + {"StatusResetContent", Const, 0, ""}, + {"StatusSeeOther", Const, 0, ""}, + {"StatusServiceUnavailable", Const, 0, ""}, + {"StatusSwitchingProtocols", Const, 0, ""}, + {"StatusTeapot", Const, 0, ""}, + {"StatusTemporaryRedirect", Const, 0, ""}, + {"StatusText", Func, 0, "func(code int) string"}, + {"StatusTooEarly", Const, 12, ""}, + {"StatusTooManyRequests", Const, 6, ""}, + {"StatusUnauthorized", Const, 0, ""}, + {"StatusUnavailableForLegalReasons", Const, 6, ""}, + {"StatusUnprocessableEntity", Const, 7, ""}, + {"StatusUnsupportedMediaType", Const, 0, ""}, + {"StatusUpgradeRequired", Const, 7, ""}, + {"StatusUseProxy", Const, 0, ""}, + {"StatusVariantAlsoNegotiates", Const, 7, ""}, + {"StripPrefix", Func, 0, "func(prefix string, h Handler) Handler"}, + {"TimeFormat", Const, 0, ""}, + {"TimeoutHandler", Func, 0, "func(h Handler, dt time.Duration, msg string) Handler"}, + {"TrailerPrefix", Const, 8, ""}, + {"Transport", Type, 0, ""}, + {"Transport.Dial", Field, 0, ""}, + {"Transport.DialContext", Field, 7, ""}, + {"Transport.DialTLS", Field, 4, ""}, + {"Transport.DialTLSContext", Field, 14, ""}, + {"Transport.DisableCompression", Field, 0, ""}, + {"Transport.DisableKeepAlives", Field, 0, ""}, + {"Transport.ExpectContinueTimeout", Field, 6, ""}, + {"Transport.ForceAttemptHTTP2", Field, 13, ""}, + {"Transport.GetProxyConnectHeader", Field, 16, ""}, + {"Transport.HTTP2", Field, 24, ""}, + {"Transport.IdleConnTimeout", Field, 7, ""}, + {"Transport.MaxConnsPerHost", Field, 11, ""}, + {"Transport.MaxIdleConns", Field, 7, ""}, + {"Transport.MaxIdleConnsPerHost", Field, 0, ""}, + {"Transport.MaxResponseHeaderBytes", Field, 7, ""}, + {"Transport.OnProxyConnectResponse", Field, 20, ""}, + {"Transport.Protocols", Field, 24, ""}, + {"Transport.Proxy", Field, 0, ""}, + {"Transport.ProxyConnectHeader", Field, 8, ""}, + {"Transport.ReadBufferSize", Field, 13, ""}, + {"Transport.ResponseHeaderTimeout", Field, 1, ""}, + {"Transport.TLSClientConfig", Field, 0, ""}, + {"Transport.TLSHandshakeTimeout", Field, 3, ""}, + {"Transport.TLSNextProto", Field, 6, ""}, + {"Transport.WriteBufferSize", Field, 13, ""}, + }, + "net/http/cgi": { + {"(*Handler).ServeHTTP", Method, 0, ""}, + {"Handler", Type, 0, ""}, + {"Handler.Args", Field, 0, ""}, + {"Handler.Dir", Field, 0, ""}, + {"Handler.Env", Field, 0, ""}, + {"Handler.InheritEnv", Field, 0, ""}, + {"Handler.Logger", Field, 0, ""}, + {"Handler.Path", Field, 0, ""}, + {"Handler.PathLocationHandler", Field, 0, ""}, + {"Handler.Root", Field, 0, ""}, + {"Handler.Stderr", Field, 7, ""}, + {"Request", Func, 0, "func() (*http.Request, error)"}, + {"RequestFromMap", Func, 0, "func(params map[string]string) (*http.Request, error)"}, + {"Serve", Func, 0, "func(handler http.Handler) error"}, + }, + "net/http/cookiejar": { + {"(*Jar).Cookies", Method, 1, ""}, + {"(*Jar).SetCookies", Method, 1, ""}, + {"Jar", Type, 1, ""}, + {"New", Func, 1, "func(o *Options) (*Jar, error)"}, + {"Options", Type, 1, ""}, + {"Options.PublicSuffixList", Field, 1, ""}, + {"PublicSuffixList", Type, 1, ""}, + }, + "net/http/fcgi": { + {"ErrConnClosed", Var, 5, ""}, + {"ErrRequestAborted", Var, 5, ""}, + {"ProcessEnv", Func, 9, "func(r *http.Request) map[string]string"}, + {"Serve", Func, 0, "func(l net.Listener, handler http.Handler) error"}, + }, + "net/http/httptest": { + {"(*ResponseRecorder).Flush", Method, 0, ""}, + {"(*ResponseRecorder).Header", Method, 0, ""}, + {"(*ResponseRecorder).Result", Method, 7, ""}, + {"(*ResponseRecorder).Write", Method, 0, ""}, + {"(*ResponseRecorder).WriteHeader", Method, 0, ""}, + {"(*ResponseRecorder).WriteString", Method, 6, ""}, + {"(*Server).Certificate", Method, 9, ""}, + {"(*Server).Client", Method, 9, ""}, + {"(*Server).Close", Method, 0, ""}, + {"(*Server).CloseClientConnections", Method, 0, ""}, + {"(*Server).Start", Method, 0, ""}, + {"(*Server).StartTLS", Method, 0, ""}, + {"DefaultRemoteAddr", Const, 0, ""}, + {"NewRecorder", Func, 0, "func() *ResponseRecorder"}, + {"NewRequest", Func, 7, "func(method string, target string, body io.Reader) *http.Request"}, + {"NewRequestWithContext", Func, 23, "func(ctx context.Context, method string, target string, body io.Reader) *http.Request"}, + {"NewServer", Func, 0, "func(handler http.Handler) *Server"}, + {"NewTLSServer", Func, 0, "func(handler http.Handler) *Server"}, + {"NewUnstartedServer", Func, 0, "func(handler http.Handler) *Server"}, + {"ResponseRecorder", Type, 0, ""}, + {"ResponseRecorder.Body", Field, 0, ""}, + {"ResponseRecorder.Code", Field, 0, ""}, + {"ResponseRecorder.Flushed", Field, 0, ""}, + {"ResponseRecorder.HeaderMap", Field, 0, ""}, + {"Server", Type, 0, ""}, + {"Server.Config", Field, 0, ""}, + {"Server.EnableHTTP2", Field, 14, ""}, + {"Server.Listener", Field, 0, ""}, + {"Server.TLS", Field, 0, ""}, + {"Server.URL", Field, 0, ""}, + }, + "net/http/httptrace": { + {"ClientTrace", Type, 7, ""}, + {"ClientTrace.ConnectDone", Field, 7, ""}, + {"ClientTrace.ConnectStart", Field, 7, ""}, + {"ClientTrace.DNSDone", Field, 7, ""}, + {"ClientTrace.DNSStart", Field, 7, ""}, + {"ClientTrace.GetConn", Field, 7, ""}, + {"ClientTrace.Got100Continue", Field, 7, ""}, + {"ClientTrace.Got1xxResponse", Field, 11, ""}, + {"ClientTrace.GotConn", Field, 7, ""}, + {"ClientTrace.GotFirstResponseByte", Field, 7, ""}, + {"ClientTrace.PutIdleConn", Field, 7, ""}, + {"ClientTrace.TLSHandshakeDone", Field, 8, ""}, + {"ClientTrace.TLSHandshakeStart", Field, 8, ""}, + {"ClientTrace.Wait100Continue", Field, 7, ""}, + {"ClientTrace.WroteHeaderField", Field, 11, ""}, + {"ClientTrace.WroteHeaders", Field, 7, ""}, + {"ClientTrace.WroteRequest", Field, 7, ""}, + {"ContextClientTrace", Func, 7, "func(ctx context.Context) *ClientTrace"}, + {"DNSDoneInfo", Type, 7, ""}, + {"DNSDoneInfo.Addrs", Field, 7, ""}, + {"DNSDoneInfo.Coalesced", Field, 7, ""}, + {"DNSDoneInfo.Err", Field, 7, ""}, + {"DNSStartInfo", Type, 7, ""}, + {"DNSStartInfo.Host", Field, 7, ""}, + {"GotConnInfo", Type, 7, ""}, + {"GotConnInfo.Conn", Field, 7, ""}, + {"GotConnInfo.IdleTime", Field, 7, ""}, + {"GotConnInfo.Reused", Field, 7, ""}, + {"GotConnInfo.WasIdle", Field, 7, ""}, + {"WithClientTrace", Func, 7, "func(ctx context.Context, trace *ClientTrace) context.Context"}, + {"WroteRequestInfo", Type, 7, ""}, + {"WroteRequestInfo.Err", Field, 7, ""}, + }, + "net/http/httputil": { + {"(*ClientConn).Close", Method, 0, ""}, + {"(*ClientConn).Do", Method, 0, ""}, + {"(*ClientConn).Hijack", Method, 0, ""}, + {"(*ClientConn).Pending", Method, 0, ""}, + {"(*ClientConn).Read", Method, 0, ""}, + {"(*ClientConn).Write", Method, 0, ""}, + {"(*ProxyRequest).SetURL", Method, 20, ""}, + {"(*ProxyRequest).SetXForwarded", Method, 20, ""}, + {"(*ReverseProxy).ServeHTTP", Method, 0, ""}, + {"(*ServerConn).Close", Method, 0, ""}, + {"(*ServerConn).Hijack", Method, 0, ""}, + {"(*ServerConn).Pending", Method, 0, ""}, + {"(*ServerConn).Read", Method, 0, ""}, + {"(*ServerConn).Write", Method, 0, ""}, + {"BufferPool", Type, 6, ""}, + {"ClientConn", Type, 0, ""}, + {"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"}, + {"DumpRequestOut", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"}, + {"DumpResponse", Func, 0, "func(resp *http.Response, body bool) ([]byte, error)"}, + {"ErrClosed", Var, 0, ""}, + {"ErrLineTooLong", Var, 0, ""}, + {"ErrPersistEOF", Var, 0, ""}, + {"ErrPipeline", Var, 0, ""}, + {"NewChunkedReader", Func, 0, "func(r io.Reader) io.Reader"}, + {"NewChunkedWriter", Func, 0, "func(w io.Writer) io.WriteCloser"}, + {"NewClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"}, + {"NewProxyClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"}, + {"NewServerConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ServerConn"}, + {"NewSingleHostReverseProxy", Func, 0, "func(target *url.URL) *ReverseProxy"}, + {"ProxyRequest", Type, 20, ""}, + {"ProxyRequest.In", Field, 20, ""}, + {"ProxyRequest.Out", Field, 20, ""}, + {"ReverseProxy", Type, 0, ""}, + {"ReverseProxy.BufferPool", Field, 6, ""}, + {"ReverseProxy.Director", Field, 0, ""}, + {"ReverseProxy.ErrorHandler", Field, 11, ""}, + {"ReverseProxy.ErrorLog", Field, 4, ""}, + {"ReverseProxy.FlushInterval", Field, 0, ""}, + {"ReverseProxy.ModifyResponse", Field, 8, ""}, + {"ReverseProxy.Rewrite", Field, 20, ""}, + {"ReverseProxy.Transport", Field, 0, ""}, + {"ServerConn", Type, 0, ""}, + }, + "net/http/pprof": { + {"Cmdline", Func, 0, "func(w http.ResponseWriter, r *http.Request)"}, + {"Handler", Func, 0, "func(name string) http.Handler"}, + {"Index", Func, 0, "func(w http.ResponseWriter, r *http.Request)"}, + {"Profile", Func, 0, "func(w http.ResponseWriter, r *http.Request)"}, + {"Symbol", Func, 0, "func(w http.ResponseWriter, r *http.Request)"}, + {"Trace", Func, 5, "func(w http.ResponseWriter, r *http.Request)"}, + }, + "net/mail": { + {"(*Address).String", Method, 0, ""}, + {"(*AddressParser).Parse", Method, 5, ""}, + {"(*AddressParser).ParseList", Method, 5, ""}, + {"(Header).AddressList", Method, 0, ""}, + {"(Header).Date", Method, 0, ""}, + {"(Header).Get", Method, 0, ""}, + {"Address", Type, 0, ""}, + {"Address.Address", Field, 0, ""}, + {"Address.Name", Field, 0, ""}, + {"AddressParser", Type, 5, ""}, + {"AddressParser.WordDecoder", Field, 5, ""}, + {"ErrHeaderNotPresent", Var, 0, ""}, + {"Header", Type, 0, ""}, + {"Message", Type, 0, ""}, + {"Message.Body", Field, 0, ""}, + {"Message.Header", Field, 0, ""}, + {"ParseAddress", Func, 1, "func(address string) (*Address, error)"}, + {"ParseAddressList", Func, 1, "func(list string) ([]*Address, error)"}, + {"ParseDate", Func, 8, "func(date string) (time.Time, error)"}, + {"ReadMessage", Func, 0, "func(r io.Reader) (msg *Message, err error)"}, + }, + "net/netip": { + {"(*Addr).UnmarshalBinary", Method, 18, ""}, + {"(*Addr).UnmarshalText", Method, 18, ""}, + {"(*AddrPort).UnmarshalBinary", Method, 18, ""}, + {"(*AddrPort).UnmarshalText", Method, 18, ""}, + {"(*Prefix).UnmarshalBinary", Method, 18, ""}, + {"(*Prefix).UnmarshalText", Method, 18, ""}, + {"(Addr).AppendBinary", Method, 24, ""}, + {"(Addr).AppendText", Method, 24, ""}, + {"(Addr).AppendTo", Method, 18, ""}, + {"(Addr).As16", Method, 18, ""}, + {"(Addr).As4", Method, 18, ""}, + {"(Addr).AsSlice", Method, 18, ""}, + {"(Addr).BitLen", Method, 18, ""}, + {"(Addr).Compare", Method, 18, ""}, + {"(Addr).Is4", Method, 18, ""}, + {"(Addr).Is4In6", Method, 18, ""}, + {"(Addr).Is6", Method, 18, ""}, + {"(Addr).IsGlobalUnicast", Method, 18, ""}, + {"(Addr).IsInterfaceLocalMulticast", Method, 18, ""}, + {"(Addr).IsLinkLocalMulticast", Method, 18, ""}, + {"(Addr).IsLinkLocalUnicast", Method, 18, ""}, + {"(Addr).IsLoopback", Method, 18, ""}, + {"(Addr).IsMulticast", Method, 18, ""}, + {"(Addr).IsPrivate", Method, 18, ""}, + {"(Addr).IsUnspecified", Method, 18, ""}, + {"(Addr).IsValid", Method, 18, ""}, + {"(Addr).Less", Method, 18, ""}, + {"(Addr).MarshalBinary", Method, 18, ""}, + {"(Addr).MarshalText", Method, 18, ""}, + {"(Addr).Next", Method, 18, ""}, + {"(Addr).Prefix", Method, 18, ""}, + {"(Addr).Prev", Method, 18, ""}, + {"(Addr).String", Method, 18, ""}, + {"(Addr).StringExpanded", Method, 18, ""}, + {"(Addr).Unmap", Method, 18, ""}, + {"(Addr).WithZone", Method, 18, ""}, + {"(Addr).Zone", Method, 18, ""}, + {"(AddrPort).Addr", Method, 18, ""}, + {"(AddrPort).AppendBinary", Method, 24, ""}, + {"(AddrPort).AppendText", Method, 24, ""}, + {"(AddrPort).AppendTo", Method, 18, ""}, + {"(AddrPort).Compare", Method, 22, ""}, + {"(AddrPort).IsValid", Method, 18, ""}, + {"(AddrPort).MarshalBinary", Method, 18, ""}, + {"(AddrPort).MarshalText", Method, 18, ""}, + {"(AddrPort).Port", Method, 18, ""}, + {"(AddrPort).String", Method, 18, ""}, + {"(Prefix).Addr", Method, 18, ""}, + {"(Prefix).AppendBinary", Method, 24, ""}, + {"(Prefix).AppendText", Method, 24, ""}, + {"(Prefix).AppendTo", Method, 18, ""}, + {"(Prefix).Bits", Method, 18, ""}, + {"(Prefix).Contains", Method, 18, ""}, + {"(Prefix).IsSingleIP", Method, 18, ""}, + {"(Prefix).IsValid", Method, 18, ""}, + {"(Prefix).MarshalBinary", Method, 18, ""}, + {"(Prefix).MarshalText", Method, 18, ""}, + {"(Prefix).Masked", Method, 18, ""}, + {"(Prefix).Overlaps", Method, 18, ""}, + {"(Prefix).String", Method, 18, ""}, + {"Addr", Type, 18, ""}, + {"AddrFrom16", Func, 18, "func(addr [16]byte) Addr"}, + {"AddrFrom4", Func, 18, "func(addr [4]byte) Addr"}, + {"AddrFromSlice", Func, 18, "func(slice []byte) (ip Addr, ok bool)"}, + {"AddrPort", Type, 18, ""}, + {"AddrPortFrom", Func, 18, "func(ip Addr, port uint16) AddrPort"}, + {"IPv4Unspecified", Func, 18, "func() Addr"}, + {"IPv6LinkLocalAllNodes", Func, 18, "func() Addr"}, + {"IPv6LinkLocalAllRouters", Func, 20, "func() Addr"}, + {"IPv6Loopback", Func, 20, "func() Addr"}, + {"IPv6Unspecified", Func, 18, "func() Addr"}, + {"MustParseAddr", Func, 18, "func(s string) Addr"}, + {"MustParseAddrPort", Func, 18, "func(s string) AddrPort"}, + {"MustParsePrefix", Func, 18, "func(s string) Prefix"}, + {"ParseAddr", Func, 18, "func(s string) (Addr, error)"}, + {"ParseAddrPort", Func, 18, "func(s string) (AddrPort, error)"}, + {"ParsePrefix", Func, 18, "func(s string) (Prefix, error)"}, + {"Prefix", Type, 18, ""}, + {"PrefixFrom", Func, 18, "func(ip Addr, bits int) Prefix"}, + }, + "net/rpc": { + {"(*Client).Call", Method, 0, ""}, + {"(*Client).Close", Method, 0, ""}, + {"(*Client).Go", Method, 0, ""}, + {"(*Server).Accept", Method, 0, ""}, + {"(*Server).HandleHTTP", Method, 0, ""}, + {"(*Server).Register", Method, 0, ""}, + {"(*Server).RegisterName", Method, 0, ""}, + {"(*Server).ServeCodec", Method, 0, ""}, + {"(*Server).ServeConn", Method, 0, ""}, + {"(*Server).ServeHTTP", Method, 0, ""}, + {"(*Server).ServeRequest", Method, 0, ""}, + {"(ServerError).Error", Method, 0, ""}, + {"Accept", Func, 0, "func(lis net.Listener)"}, + {"Call", Type, 0, ""}, + {"Call.Args", Field, 0, ""}, + {"Call.Done", Field, 0, ""}, + {"Call.Error", Field, 0, ""}, + {"Call.Reply", Field, 0, ""}, + {"Call.ServiceMethod", Field, 0, ""}, + {"Client", Type, 0, ""}, + {"ClientCodec", Type, 0, ""}, + {"DefaultDebugPath", Const, 0, ""}, + {"DefaultRPCPath", Const, 0, ""}, + {"DefaultServer", Var, 0, ""}, + {"Dial", Func, 0, "func(network string, address string) (*Client, error)"}, + {"DialHTTP", Func, 0, "func(network string, address string) (*Client, error)"}, + {"DialHTTPPath", Func, 0, "func(network string, address string, path string) (*Client, error)"}, + {"ErrShutdown", Var, 0, ""}, + {"HandleHTTP", Func, 0, "func()"}, + {"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *Client"}, + {"NewClientWithCodec", Func, 0, "func(codec ClientCodec) *Client"}, + {"NewServer", Func, 0, "func() *Server"}, + {"Register", Func, 0, "func(rcvr any) error"}, + {"RegisterName", Func, 0, "func(name string, rcvr any) error"}, + {"Request", Type, 0, ""}, + {"Request.Seq", Field, 0, ""}, + {"Request.ServiceMethod", Field, 0, ""}, + {"Response", Type, 0, ""}, + {"Response.Error", Field, 0, ""}, + {"Response.Seq", Field, 0, ""}, + {"Response.ServiceMethod", Field, 0, ""}, + {"ServeCodec", Func, 0, "func(codec ServerCodec)"}, + {"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"}, + {"ServeRequest", Func, 0, "func(codec ServerCodec) error"}, + {"Server", Type, 0, ""}, + {"ServerCodec", Type, 0, ""}, + {"ServerError", Type, 0, ""}, + }, + "net/rpc/jsonrpc": { + {"Dial", Func, 0, "func(network string, address string) (*rpc.Client, error)"}, + {"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *rpc.Client"}, + {"NewClientCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ClientCodec"}, + {"NewServerCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ServerCodec"}, + {"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"}, + }, + "net/smtp": { + {"(*Client).Auth", Method, 0, ""}, + {"(*Client).Close", Method, 2, ""}, + {"(*Client).Data", Method, 0, ""}, + {"(*Client).Extension", Method, 0, ""}, + {"(*Client).Hello", Method, 1, ""}, + {"(*Client).Mail", Method, 0, ""}, + {"(*Client).Noop", Method, 10, ""}, + {"(*Client).Quit", Method, 0, ""}, + {"(*Client).Rcpt", Method, 0, ""}, + {"(*Client).Reset", Method, 0, ""}, + {"(*Client).StartTLS", Method, 0, ""}, + {"(*Client).TLSConnectionState", Method, 5, ""}, + {"(*Client).Verify", Method, 0, ""}, + {"Auth", Type, 0, ""}, + {"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"}, + {"Client", Type, 0, ""}, + {"Client.Text", Field, 0, ""}, + {"Dial", Func, 0, "func(addr string) (*Client, error)"}, + {"NewClient", Func, 0, "func(conn net.Conn, host string) (*Client, error)"}, + {"PlainAuth", Func, 0, "func(identity string, username string, password string, host string) Auth"}, + {"SendMail", Func, 0, "func(addr string, a Auth, from string, to []string, msg []byte) error"}, + {"ServerInfo", Type, 0, ""}, + {"ServerInfo.Auth", Field, 0, ""}, + {"ServerInfo.Name", Field, 0, ""}, + {"ServerInfo.TLS", Field, 0, ""}, + }, + "net/textproto": { + {"(*Conn).Close", Method, 0, ""}, + {"(*Conn).Cmd", Method, 0, ""}, + {"(*Conn).DotReader", Method, 0, ""}, + {"(*Conn).DotWriter", Method, 0, ""}, + {"(*Conn).EndRequest", Method, 0, ""}, + {"(*Conn).EndResponse", Method, 0, ""}, + {"(*Conn).Next", Method, 0, ""}, + {"(*Conn).PrintfLine", Method, 0, ""}, + {"(*Conn).ReadCodeLine", Method, 0, ""}, + {"(*Conn).ReadContinuedLine", Method, 0, ""}, + {"(*Conn).ReadContinuedLineBytes", Method, 0, ""}, + {"(*Conn).ReadDotBytes", Method, 0, ""}, + {"(*Conn).ReadDotLines", Method, 0, ""}, + {"(*Conn).ReadLine", Method, 0, ""}, + {"(*Conn).ReadLineBytes", Method, 0, ""}, + {"(*Conn).ReadMIMEHeader", Method, 0, ""}, + {"(*Conn).ReadResponse", Method, 0, ""}, + {"(*Conn).StartRequest", Method, 0, ""}, + {"(*Conn).StartResponse", Method, 0, ""}, + {"(*Error).Error", Method, 0, ""}, + {"(*Pipeline).EndRequest", Method, 0, ""}, + {"(*Pipeline).EndResponse", Method, 0, ""}, + {"(*Pipeline).Next", Method, 0, ""}, + {"(*Pipeline).StartRequest", Method, 0, ""}, + {"(*Pipeline).StartResponse", Method, 0, ""}, + {"(*Reader).DotReader", Method, 0, ""}, + {"(*Reader).ReadCodeLine", Method, 0, ""}, + {"(*Reader).ReadContinuedLine", Method, 0, ""}, + {"(*Reader).ReadContinuedLineBytes", Method, 0, ""}, + {"(*Reader).ReadDotBytes", Method, 0, ""}, + {"(*Reader).ReadDotLines", Method, 0, ""}, + {"(*Reader).ReadLine", Method, 0, ""}, + {"(*Reader).ReadLineBytes", Method, 0, ""}, + {"(*Reader).ReadMIMEHeader", Method, 0, ""}, + {"(*Reader).ReadResponse", Method, 0, ""}, + {"(*Writer).DotWriter", Method, 0, ""}, + {"(*Writer).PrintfLine", Method, 0, ""}, + {"(MIMEHeader).Add", Method, 0, ""}, + {"(MIMEHeader).Del", Method, 0, ""}, + {"(MIMEHeader).Get", Method, 0, ""}, + {"(MIMEHeader).Set", Method, 0, ""}, + {"(MIMEHeader).Values", Method, 14, ""}, + {"(ProtocolError).Error", Method, 0, ""}, + {"CanonicalMIMEHeaderKey", Func, 0, "func(s string) string"}, + {"Conn", Type, 0, ""}, + {"Conn.Pipeline", Field, 0, ""}, + {"Conn.Reader", Field, 0, ""}, + {"Conn.Writer", Field, 0, ""}, + {"Dial", Func, 0, "func(network string, addr string) (*Conn, error)"}, + {"Error", Type, 0, ""}, + {"Error.Code", Field, 0, ""}, + {"Error.Msg", Field, 0, ""}, + {"MIMEHeader", Type, 0, ""}, + {"NewConn", Func, 0, "func(conn io.ReadWriteCloser) *Conn"}, + {"NewReader", Func, 0, "func(r *bufio.Reader) *Reader"}, + {"NewWriter", Func, 0, "func(w *bufio.Writer) *Writer"}, + {"Pipeline", Type, 0, ""}, + {"ProtocolError", Type, 0, ""}, + {"Reader", Type, 0, ""}, + {"Reader.R", Field, 0, ""}, + {"TrimBytes", Func, 1, "func(b []byte) []byte"}, + {"TrimString", Func, 1, "func(s string) string"}, + {"Writer", Type, 0, ""}, + {"Writer.W", Field, 0, ""}, + }, + "net/url": { + {"(*Error).Error", Method, 0, ""}, + {"(*Error).Temporary", Method, 6, ""}, + {"(*Error).Timeout", Method, 6, ""}, + {"(*Error).Unwrap", Method, 13, ""}, + {"(*URL).AppendBinary", Method, 24, ""}, + {"(*URL).EscapedFragment", Method, 15, ""}, + {"(*URL).EscapedPath", Method, 5, ""}, + {"(*URL).Hostname", Method, 8, ""}, + {"(*URL).IsAbs", Method, 0, ""}, + {"(*URL).JoinPath", Method, 19, ""}, + {"(*URL).MarshalBinary", Method, 8, ""}, + {"(*URL).Parse", Method, 0, ""}, + {"(*URL).Port", Method, 8, ""}, + {"(*URL).Query", Method, 0, ""}, + {"(*URL).Redacted", Method, 15, ""}, + {"(*URL).RequestURI", Method, 0, ""}, + {"(*URL).ResolveReference", Method, 0, ""}, + {"(*URL).String", Method, 0, ""}, + {"(*URL).UnmarshalBinary", Method, 8, ""}, + {"(*Userinfo).Password", Method, 0, ""}, + {"(*Userinfo).String", Method, 0, ""}, + {"(*Userinfo).Username", Method, 0, ""}, + {"(EscapeError).Error", Method, 0, ""}, + {"(InvalidHostError).Error", Method, 6, ""}, + {"(Values).Add", Method, 0, ""}, + {"(Values).Del", Method, 0, ""}, + {"(Values).Encode", Method, 0, ""}, + {"(Values).Get", Method, 0, ""}, + {"(Values).Has", Method, 17, ""}, + {"(Values).Set", Method, 0, ""}, + {"Error", Type, 0, ""}, + {"Error.Err", Field, 0, ""}, + {"Error.Op", Field, 0, ""}, + {"Error.URL", Field, 0, ""}, + {"EscapeError", Type, 0, ""}, + {"InvalidHostError", Type, 6, ""}, + {"JoinPath", Func, 19, "func(base string, elem ...string) (result string, err error)"}, + {"Parse", Func, 0, "func(rawURL string) (*URL, error)"}, + {"ParseQuery", Func, 0, "func(query string) (Values, error)"}, + {"ParseRequestURI", Func, 0, "func(rawURL string) (*URL, error)"}, + {"PathEscape", Func, 8, "func(s string) string"}, + {"PathUnescape", Func, 8, "func(s string) (string, error)"}, + {"QueryEscape", Func, 0, "func(s string) string"}, + {"QueryUnescape", Func, 0, "func(s string) (string, error)"}, + {"URL", Type, 0, ""}, + {"URL.ForceQuery", Field, 7, ""}, + {"URL.Fragment", Field, 0, ""}, + {"URL.Host", Field, 0, ""}, + {"URL.OmitHost", Field, 19, ""}, + {"URL.Opaque", Field, 0, ""}, + {"URL.Path", Field, 0, ""}, + {"URL.RawFragment", Field, 15, ""}, + {"URL.RawPath", Field, 5, ""}, + {"URL.RawQuery", Field, 0, ""}, + {"URL.Scheme", Field, 0, ""}, + {"URL.User", Field, 0, ""}, + {"User", Func, 0, "func(username string) *Userinfo"}, + {"UserPassword", Func, 0, "func(username string, password string) *Userinfo"}, + {"Userinfo", Type, 0, ""}, + {"Values", Type, 0, ""}, + }, + "os": { + {"(*File).Chdir", Method, 0, ""}, + {"(*File).Chmod", Method, 0, ""}, + {"(*File).Chown", Method, 0, ""}, + {"(*File).Close", Method, 0, ""}, + {"(*File).Fd", Method, 0, ""}, + {"(*File).Name", Method, 0, ""}, + {"(*File).Read", Method, 0, ""}, + {"(*File).ReadAt", Method, 0, ""}, + {"(*File).ReadDir", Method, 16, ""}, + {"(*File).ReadFrom", Method, 15, ""}, + {"(*File).Readdir", Method, 0, ""}, + {"(*File).Readdirnames", Method, 0, ""}, + {"(*File).Seek", Method, 0, ""}, + {"(*File).SetDeadline", Method, 10, ""}, + {"(*File).SetReadDeadline", Method, 10, ""}, + {"(*File).SetWriteDeadline", Method, 10, ""}, + {"(*File).Stat", Method, 0, ""}, + {"(*File).Sync", Method, 0, ""}, + {"(*File).SyscallConn", Method, 12, ""}, + {"(*File).Truncate", Method, 0, ""}, + {"(*File).Write", Method, 0, ""}, + {"(*File).WriteAt", Method, 0, ""}, + {"(*File).WriteString", Method, 0, ""}, + {"(*File).WriteTo", Method, 22, ""}, + {"(*LinkError).Error", Method, 0, ""}, + {"(*LinkError).Unwrap", Method, 13, ""}, + {"(*PathError).Error", Method, 0, ""}, + {"(*PathError).Timeout", Method, 10, ""}, + {"(*PathError).Unwrap", Method, 13, ""}, + {"(*Process).Kill", Method, 0, ""}, + {"(*Process).Release", Method, 0, ""}, + {"(*Process).Signal", Method, 0, ""}, + {"(*Process).Wait", Method, 0, ""}, + {"(*ProcessState).ExitCode", Method, 12, ""}, + {"(*ProcessState).Exited", Method, 0, ""}, + {"(*ProcessState).Pid", Method, 0, ""}, + {"(*ProcessState).String", Method, 0, ""}, + {"(*ProcessState).Success", Method, 0, ""}, + {"(*ProcessState).Sys", Method, 0, ""}, + {"(*ProcessState).SysUsage", Method, 0, ""}, + {"(*ProcessState).SystemTime", Method, 0, ""}, + {"(*ProcessState).UserTime", Method, 0, ""}, + {"(*Root).Chmod", Method, 25, ""}, + {"(*Root).Chown", Method, 25, ""}, + {"(*Root).Chtimes", Method, 25, ""}, + {"(*Root).Close", Method, 24, ""}, + {"(*Root).Create", Method, 24, ""}, + {"(*Root).FS", Method, 24, ""}, + {"(*Root).Lchown", Method, 25, ""}, + {"(*Root).Link", Method, 25, ""}, + {"(*Root).Lstat", Method, 24, ""}, + {"(*Root).Mkdir", Method, 24, ""}, + {"(*Root).Name", Method, 24, ""}, + {"(*Root).Open", Method, 24, ""}, + {"(*Root).OpenFile", Method, 24, ""}, + {"(*Root).OpenRoot", Method, 24, ""}, + {"(*Root).Readlink", Method, 25, ""}, + {"(*Root).Remove", Method, 24, ""}, + {"(*Root).Rename", Method, 25, ""}, + {"(*Root).Stat", Method, 24, ""}, + {"(*Root).Symlink", Method, 25, ""}, + {"(*SyscallError).Error", Method, 0, ""}, + {"(*SyscallError).Timeout", Method, 10, ""}, + {"(*SyscallError).Unwrap", Method, 13, ""}, + {"(FileMode).IsDir", Method, 0, ""}, + {"(FileMode).IsRegular", Method, 1, ""}, + {"(FileMode).Perm", Method, 0, ""}, + {"(FileMode).String", Method, 0, ""}, + {"Args", Var, 0, ""}, + {"Chdir", Func, 0, "func(dir string) error"}, + {"Chmod", Func, 0, "func(name string, mode FileMode) error"}, + {"Chown", Func, 0, "func(name string, uid int, gid int) error"}, + {"Chtimes", Func, 0, "func(name string, atime time.Time, mtime time.Time) error"}, + {"Clearenv", Func, 0, "func()"}, + {"CopyFS", Func, 23, "func(dir string, fsys fs.FS) error"}, + {"Create", Func, 0, "func(name string) (*File, error)"}, + {"CreateTemp", Func, 16, "func(dir string, pattern string) (*File, error)"}, + {"DevNull", Const, 0, ""}, + {"DirEntry", Type, 16, ""}, + {"DirFS", Func, 16, "func(dir string) fs.FS"}, + {"Environ", Func, 0, "func() []string"}, + {"ErrClosed", Var, 8, ""}, + {"ErrDeadlineExceeded", Var, 15, ""}, + {"ErrExist", Var, 0, ""}, + {"ErrInvalid", Var, 0, ""}, + {"ErrNoDeadline", Var, 10, ""}, + {"ErrNotExist", Var, 0, ""}, + {"ErrPermission", Var, 0, ""}, + {"ErrProcessDone", Var, 16, ""}, + {"Executable", Func, 8, "func() (string, error)"}, + {"Exit", Func, 0, "func(code int)"}, + {"Expand", Func, 0, "func(s string, mapping func(string) string) string"}, + {"ExpandEnv", Func, 0, "func(s string) string"}, + {"File", Type, 0, ""}, + {"FileInfo", Type, 0, ""}, + {"FileMode", Type, 0, ""}, + {"FindProcess", Func, 0, "func(pid int) (*Process, error)"}, + {"Getegid", Func, 0, "func() int"}, + {"Getenv", Func, 0, "func(key string) string"}, + {"Geteuid", Func, 0, "func() int"}, + {"Getgid", Func, 0, "func() int"}, + {"Getgroups", Func, 0, "func() ([]int, error)"}, + {"Getpagesize", Func, 0, "func() int"}, + {"Getpid", Func, 0, "func() int"}, + {"Getppid", Func, 0, "func() int"}, + {"Getuid", Func, 0, "func() int"}, + {"Getwd", Func, 0, "func() (dir string, err error)"}, + {"Hostname", Func, 0, "func() (name string, err error)"}, + {"Interrupt", Var, 0, ""}, + {"IsExist", Func, 0, "func(err error) bool"}, + {"IsNotExist", Func, 0, "func(err error) bool"}, + {"IsPathSeparator", Func, 0, "func(c uint8) bool"}, + {"IsPermission", Func, 0, "func(err error) bool"}, + {"IsTimeout", Func, 10, "func(err error) bool"}, + {"Kill", Var, 0, ""}, + {"Lchown", Func, 0, "func(name string, uid int, gid int) error"}, + {"Link", Func, 0, "func(oldname string, newname string) error"}, + {"LinkError", Type, 0, ""}, + {"LinkError.Err", Field, 0, ""}, + {"LinkError.New", Field, 0, ""}, + {"LinkError.Old", Field, 0, ""}, + {"LinkError.Op", Field, 0, ""}, + {"LookupEnv", Func, 5, "func(key string) (string, bool)"}, + {"Lstat", Func, 0, "func(name string) (FileInfo, error)"}, + {"Mkdir", Func, 0, "func(name string, perm FileMode) error"}, + {"MkdirAll", Func, 0, "func(path string, perm FileMode) error"}, + {"MkdirTemp", Func, 16, "func(dir string, pattern string) (string, error)"}, + {"ModeAppend", Const, 0, ""}, + {"ModeCharDevice", Const, 0, ""}, + {"ModeDevice", Const, 0, ""}, + {"ModeDir", Const, 0, ""}, + {"ModeExclusive", Const, 0, ""}, + {"ModeIrregular", Const, 11, ""}, + {"ModeNamedPipe", Const, 0, ""}, + {"ModePerm", Const, 0, ""}, + {"ModeSetgid", Const, 0, ""}, + {"ModeSetuid", Const, 0, ""}, + {"ModeSocket", Const, 0, ""}, + {"ModeSticky", Const, 0, ""}, + {"ModeSymlink", Const, 0, ""}, + {"ModeTemporary", Const, 0, ""}, + {"ModeType", Const, 0, ""}, + {"NewFile", Func, 0, "func(fd uintptr, name string) *File"}, + {"NewSyscallError", Func, 0, "func(syscall string, err error) error"}, + {"O_APPEND", Const, 0, ""}, + {"O_CREATE", Const, 0, ""}, + {"O_EXCL", Const, 0, ""}, + {"O_RDONLY", Const, 0, ""}, + {"O_RDWR", Const, 0, ""}, + {"O_SYNC", Const, 0, ""}, + {"O_TRUNC", Const, 0, ""}, + {"O_WRONLY", Const, 0, ""}, + {"Open", Func, 0, "func(name string) (*File, error)"}, + {"OpenFile", Func, 0, "func(name string, flag int, perm FileMode) (*File, error)"}, + {"OpenInRoot", Func, 24, "func(dir string, name string) (*File, error)"}, + {"OpenRoot", Func, 24, "func(name string) (*Root, error)"}, + {"PathError", Type, 0, ""}, + {"PathError.Err", Field, 0, ""}, + {"PathError.Op", Field, 0, ""}, + {"PathError.Path", Field, 0, ""}, + {"PathListSeparator", Const, 0, ""}, + {"PathSeparator", Const, 0, ""}, + {"Pipe", Func, 0, "func() (r *File, w *File, err error)"}, + {"ProcAttr", Type, 0, ""}, + {"ProcAttr.Dir", Field, 0, ""}, + {"ProcAttr.Env", Field, 0, ""}, + {"ProcAttr.Files", Field, 0, ""}, + {"ProcAttr.Sys", Field, 0, ""}, + {"Process", Type, 0, ""}, + {"Process.Pid", Field, 0, ""}, + {"ProcessState", Type, 0, ""}, + {"ReadDir", Func, 16, "func(name string) ([]DirEntry, error)"}, + {"ReadFile", Func, 16, "func(name string) ([]byte, error)"}, + {"Readlink", Func, 0, "func(name string) (string, error)"}, + {"Remove", Func, 0, "func(name string) error"}, + {"RemoveAll", Func, 0, "func(path string) error"}, + {"Rename", Func, 0, "func(oldpath string, newpath string) error"}, + {"Root", Type, 24, ""}, + {"SEEK_CUR", Const, 0, ""}, + {"SEEK_END", Const, 0, ""}, + {"SEEK_SET", Const, 0, ""}, + {"SameFile", Func, 0, "func(fi1 FileInfo, fi2 FileInfo) bool"}, + {"Setenv", Func, 0, "func(key string, value string) error"}, + {"Signal", Type, 0, ""}, + {"StartProcess", Func, 0, "func(name string, argv []string, attr *ProcAttr) (*Process, error)"}, + {"Stat", Func, 0, "func(name string) (FileInfo, error)"}, + {"Stderr", Var, 0, ""}, + {"Stdin", Var, 0, ""}, + {"Stdout", Var, 0, ""}, + {"Symlink", Func, 0, "func(oldname string, newname string) error"}, + {"SyscallError", Type, 0, ""}, + {"SyscallError.Err", Field, 0, ""}, + {"SyscallError.Syscall", Field, 0, ""}, + {"TempDir", Func, 0, "func() string"}, + {"Truncate", Func, 0, "func(name string, size int64) error"}, + {"Unsetenv", Func, 4, "func(key string) error"}, + {"UserCacheDir", Func, 11, "func() (string, error)"}, + {"UserConfigDir", Func, 13, "func() (string, error)"}, + {"UserHomeDir", Func, 12, "func() (string, error)"}, + {"WriteFile", Func, 16, "func(name string, data []byte, perm FileMode) error"}, + }, + "os/exec": { + {"(*Cmd).CombinedOutput", Method, 0, ""}, + {"(*Cmd).Environ", Method, 19, ""}, + {"(*Cmd).Output", Method, 0, ""}, + {"(*Cmd).Run", Method, 0, ""}, + {"(*Cmd).Start", Method, 0, ""}, + {"(*Cmd).StderrPipe", Method, 0, ""}, + {"(*Cmd).StdinPipe", Method, 0, ""}, + {"(*Cmd).StdoutPipe", Method, 0, ""}, + {"(*Cmd).String", Method, 13, ""}, + {"(*Cmd).Wait", Method, 0, ""}, + {"(*Error).Error", Method, 0, ""}, + {"(*Error).Unwrap", Method, 13, ""}, + {"(*ExitError).Error", Method, 0, ""}, + {"(ExitError).ExitCode", Method, 12, ""}, + {"(ExitError).Exited", Method, 0, ""}, + {"(ExitError).Pid", Method, 0, ""}, + {"(ExitError).String", Method, 0, ""}, + {"(ExitError).Success", Method, 0, ""}, + {"(ExitError).Sys", Method, 0, ""}, + {"(ExitError).SysUsage", Method, 0, ""}, + {"(ExitError).SystemTime", Method, 0, ""}, + {"(ExitError).UserTime", Method, 0, ""}, + {"Cmd", Type, 0, ""}, + {"Cmd.Args", Field, 0, ""}, + {"Cmd.Cancel", Field, 20, ""}, + {"Cmd.Dir", Field, 0, ""}, + {"Cmd.Env", Field, 0, ""}, + {"Cmd.Err", Field, 19, ""}, + {"Cmd.ExtraFiles", Field, 0, ""}, + {"Cmd.Path", Field, 0, ""}, + {"Cmd.Process", Field, 0, ""}, + {"Cmd.ProcessState", Field, 0, ""}, + {"Cmd.Stderr", Field, 0, ""}, + {"Cmd.Stdin", Field, 0, ""}, + {"Cmd.Stdout", Field, 0, ""}, + {"Cmd.SysProcAttr", Field, 0, ""}, + {"Cmd.WaitDelay", Field, 20, ""}, + {"Command", Func, 0, "func(name string, arg ...string) *Cmd"}, + {"CommandContext", Func, 7, "func(ctx context.Context, name string, arg ...string) *Cmd"}, + {"ErrDot", Var, 19, ""}, + {"ErrNotFound", Var, 0, ""}, + {"ErrWaitDelay", Var, 20, ""}, + {"Error", Type, 0, ""}, + {"Error.Err", Field, 0, ""}, + {"Error.Name", Field, 0, ""}, + {"ExitError", Type, 0, ""}, + {"ExitError.ProcessState", Field, 0, ""}, + {"ExitError.Stderr", Field, 6, ""}, + {"LookPath", Func, 0, "func(file string) (string, error)"}, + }, + "os/signal": { + {"Ignore", Func, 5, "func(sig ...os.Signal)"}, + {"Ignored", Func, 11, "func(sig os.Signal) bool"}, + {"Notify", Func, 0, "func(c chan<- os.Signal, sig ...os.Signal)"}, + {"NotifyContext", Func, 16, "func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)"}, + {"Reset", Func, 5, "func(sig ...os.Signal)"}, + {"Stop", Func, 1, "func(c chan<- os.Signal)"}, + }, + "os/user": { + {"(*User).GroupIds", Method, 7, ""}, + {"(UnknownGroupError).Error", Method, 7, ""}, + {"(UnknownGroupIdError).Error", Method, 7, ""}, + {"(UnknownUserError).Error", Method, 0, ""}, + {"(UnknownUserIdError).Error", Method, 0, ""}, + {"Current", Func, 0, "func() (*User, error)"}, + {"Group", Type, 7, ""}, + {"Group.Gid", Field, 7, ""}, + {"Group.Name", Field, 7, ""}, + {"Lookup", Func, 0, "func(username string) (*User, error)"}, + {"LookupGroup", Func, 7, "func(name string) (*Group, error)"}, + {"LookupGroupId", Func, 7, "func(gid string) (*Group, error)"}, + {"LookupId", Func, 0, "func(uid string) (*User, error)"}, + {"UnknownGroupError", Type, 7, ""}, + {"UnknownGroupIdError", Type, 7, ""}, + {"UnknownUserError", Type, 0, ""}, + {"UnknownUserIdError", Type, 0, ""}, + {"User", Type, 0, ""}, + {"User.Gid", Field, 0, ""}, + {"User.HomeDir", Field, 0, ""}, + {"User.Name", Field, 0, ""}, + {"User.Uid", Field, 0, ""}, + {"User.Username", Field, 0, ""}, + }, + "path": { + {"Base", Func, 0, "func(path string) string"}, + {"Clean", Func, 0, "func(path string) string"}, + {"Dir", Func, 0, "func(path string) string"}, + {"ErrBadPattern", Var, 0, ""}, + {"Ext", Func, 0, "func(path string) string"}, + {"IsAbs", Func, 0, "func(path string) bool"}, + {"Join", Func, 0, "func(elem ...string) string"}, + {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"}, + {"Split", Func, 0, "func(path string) (dir string, file string)"}, + }, + "path/filepath": { + {"Abs", Func, 0, "func(path string) (string, error)"}, + {"Base", Func, 0, "func(path string) string"}, + {"Clean", Func, 0, "func(path string) string"}, + {"Dir", Func, 0, "func(path string) string"}, + {"ErrBadPattern", Var, 0, ""}, + {"EvalSymlinks", Func, 0, "func(path string) (string, error)"}, + {"Ext", Func, 0, "func(path string) string"}, + {"FromSlash", Func, 0, "func(path string) string"}, + {"Glob", Func, 0, "func(pattern string) (matches []string, err error)"}, + {"HasPrefix", Func, 0, "func(p string, prefix string) bool"}, + {"IsAbs", Func, 0, "func(path string) bool"}, + {"IsLocal", Func, 20, "func(path string) bool"}, + {"Join", Func, 0, "func(elem ...string) string"}, + {"ListSeparator", Const, 0, ""}, + {"Localize", Func, 23, "func(path string) (string, error)"}, + {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"}, + {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"}, + {"Separator", Const, 0, ""}, + {"SkipAll", Var, 20, ""}, + {"SkipDir", Var, 0, ""}, + {"Split", Func, 0, "func(path string) (dir string, file string)"}, + {"SplitList", Func, 0, "func(path string) []string"}, + {"ToSlash", Func, 0, "func(path string) string"}, + {"VolumeName", Func, 0, "func(path string) string"}, + {"Walk", Func, 0, "func(root string, fn WalkFunc) error"}, + {"WalkDir", Func, 16, "func(root string, fn fs.WalkDirFunc) error"}, + {"WalkFunc", Type, 0, ""}, + }, + "plugin": { + {"(*Plugin).Lookup", Method, 8, ""}, + {"Open", Func, 8, "func(path string) (*Plugin, error)"}, + {"Plugin", Type, 8, ""}, + {"Symbol", Type, 8, ""}, + }, + "reflect": { + {"(*MapIter).Key", Method, 12, ""}, + {"(*MapIter).Next", Method, 12, ""}, + {"(*MapIter).Reset", Method, 18, ""}, + {"(*MapIter).Value", Method, 12, ""}, + {"(*ValueError).Error", Method, 0, ""}, + {"(ChanDir).String", Method, 0, ""}, + {"(Kind).String", Method, 0, ""}, + {"(Method).IsExported", Method, 17, ""}, + {"(StructField).IsExported", Method, 17, ""}, + {"(StructTag).Get", Method, 0, ""}, + {"(StructTag).Lookup", Method, 7, ""}, + {"(Value).Addr", Method, 0, ""}, + {"(Value).Bool", Method, 0, ""}, + {"(Value).Bytes", Method, 0, ""}, + {"(Value).Call", Method, 0, ""}, + {"(Value).CallSlice", Method, 0, ""}, + {"(Value).CanAddr", Method, 0, ""}, + {"(Value).CanComplex", Method, 18, ""}, + {"(Value).CanConvert", Method, 17, ""}, + {"(Value).CanFloat", Method, 18, ""}, + {"(Value).CanInt", Method, 18, ""}, + {"(Value).CanInterface", Method, 0, ""}, + {"(Value).CanSet", Method, 0, ""}, + {"(Value).CanUint", Method, 18, ""}, + {"(Value).Cap", Method, 0, ""}, + {"(Value).Clear", Method, 21, ""}, + {"(Value).Close", Method, 0, ""}, + {"(Value).Comparable", Method, 20, ""}, + {"(Value).Complex", Method, 0, ""}, + {"(Value).Convert", Method, 1, ""}, + {"(Value).Elem", Method, 0, ""}, + {"(Value).Equal", Method, 20, ""}, + {"(Value).Field", Method, 0, ""}, + {"(Value).FieldByIndex", Method, 0, ""}, + {"(Value).FieldByIndexErr", Method, 18, ""}, + {"(Value).FieldByName", Method, 0, ""}, + {"(Value).FieldByNameFunc", Method, 0, ""}, + {"(Value).Float", Method, 0, ""}, + {"(Value).Grow", Method, 20, ""}, + {"(Value).Index", Method, 0, ""}, + {"(Value).Int", Method, 0, ""}, + {"(Value).Interface", Method, 0, ""}, + {"(Value).InterfaceData", Method, 0, ""}, + {"(Value).IsNil", Method, 0, ""}, + {"(Value).IsValid", Method, 0, ""}, + {"(Value).IsZero", Method, 13, ""}, + {"(Value).Kind", Method, 0, ""}, + {"(Value).Len", Method, 0, ""}, + {"(Value).MapIndex", Method, 0, ""}, + {"(Value).MapKeys", Method, 0, ""}, + {"(Value).MapRange", Method, 12, ""}, + {"(Value).Method", Method, 0, ""}, + {"(Value).MethodByName", Method, 0, ""}, + {"(Value).NumField", Method, 0, ""}, + {"(Value).NumMethod", Method, 0, ""}, + {"(Value).OverflowComplex", Method, 0, ""}, + {"(Value).OverflowFloat", Method, 0, ""}, + {"(Value).OverflowInt", Method, 0, ""}, + {"(Value).OverflowUint", Method, 0, ""}, + {"(Value).Pointer", Method, 0, ""}, + {"(Value).Recv", Method, 0, ""}, + {"(Value).Send", Method, 0, ""}, + {"(Value).Seq", Method, 23, ""}, + {"(Value).Seq2", Method, 23, ""}, + {"(Value).Set", Method, 0, ""}, + {"(Value).SetBool", Method, 0, ""}, + {"(Value).SetBytes", Method, 0, ""}, + {"(Value).SetCap", Method, 2, ""}, + {"(Value).SetComplex", Method, 0, ""}, + {"(Value).SetFloat", Method, 0, ""}, + {"(Value).SetInt", Method, 0, ""}, + {"(Value).SetIterKey", Method, 18, ""}, + {"(Value).SetIterValue", Method, 18, ""}, + {"(Value).SetLen", Method, 0, ""}, + {"(Value).SetMapIndex", Method, 0, ""}, + {"(Value).SetPointer", Method, 0, ""}, + {"(Value).SetString", Method, 0, ""}, + {"(Value).SetUint", Method, 0, ""}, + {"(Value).SetZero", Method, 20, ""}, + {"(Value).Slice", Method, 0, ""}, + {"(Value).Slice3", Method, 2, ""}, + {"(Value).String", Method, 0, ""}, + {"(Value).TryRecv", Method, 0, ""}, + {"(Value).TrySend", Method, 0, ""}, + {"(Value).Type", Method, 0, ""}, + {"(Value).Uint", Method, 0, ""}, + {"(Value).UnsafeAddr", Method, 0, ""}, + {"(Value).UnsafePointer", Method, 18, ""}, + {"Append", Func, 0, "func(s Value, x ...Value) Value"}, + {"AppendSlice", Func, 0, "func(s Value, t Value) Value"}, + {"Array", Const, 0, ""}, + {"ArrayOf", Func, 5, "func(length int, elem Type) Type"}, + {"Bool", Const, 0, ""}, + {"BothDir", Const, 0, ""}, + {"Chan", Const, 0, ""}, + {"ChanDir", Type, 0, ""}, + {"ChanOf", Func, 1, "func(dir ChanDir, t Type) Type"}, + {"Complex128", Const, 0, ""}, + {"Complex64", Const, 0, ""}, + {"Copy", Func, 0, "func(dst Value, src Value) int"}, + {"DeepEqual", Func, 0, "func(x any, y any) bool"}, + {"Float32", Const, 0, ""}, + {"Float64", Const, 0, ""}, + {"Func", Const, 0, ""}, + {"FuncOf", Func, 5, "func(in []Type, out []Type, variadic bool) Type"}, + {"Indirect", Func, 0, "func(v Value) Value"}, + {"Int", Const, 0, ""}, + {"Int16", Const, 0, ""}, + {"Int32", Const, 0, ""}, + {"Int64", Const, 0, ""}, + {"Int8", Const, 0, ""}, + {"Interface", Const, 0, ""}, + {"Invalid", Const, 0, ""}, + {"Kind", Type, 0, ""}, + {"MakeChan", Func, 0, "func(typ Type, buffer int) Value"}, + {"MakeFunc", Func, 1, "func(typ Type, fn func(args []Value) (results []Value)) Value"}, + {"MakeMap", Func, 0, "func(typ Type) Value"}, + {"MakeMapWithSize", Func, 9, "func(typ Type, n int) Value"}, + {"MakeSlice", Func, 0, "func(typ Type, len int, cap int) Value"}, + {"Map", Const, 0, ""}, + {"MapIter", Type, 12, ""}, + {"MapOf", Func, 1, "func(key Type, elem Type) Type"}, + {"Method", Type, 0, ""}, + {"Method.Func", Field, 0, ""}, + {"Method.Index", Field, 0, ""}, + {"Method.Name", Field, 0, ""}, + {"Method.PkgPath", Field, 0, ""}, + {"Method.Type", Field, 0, ""}, + {"New", Func, 0, "func(typ Type) Value"}, + {"NewAt", Func, 0, "func(typ Type, p unsafe.Pointer) Value"}, + {"Pointer", Const, 18, ""}, + {"PointerTo", Func, 18, "func(t Type) Type"}, + {"Ptr", Const, 0, ""}, + {"PtrTo", Func, 0, "func(t Type) Type"}, + {"RecvDir", Const, 0, ""}, + {"Select", Func, 1, "func(cases []SelectCase) (chosen int, recv Value, recvOK bool)"}, + {"SelectCase", Type, 1, ""}, + {"SelectCase.Chan", Field, 1, ""}, + {"SelectCase.Dir", Field, 1, ""}, + {"SelectCase.Send", Field, 1, ""}, + {"SelectDefault", Const, 1, ""}, + {"SelectDir", Type, 1, ""}, + {"SelectRecv", Const, 1, ""}, + {"SelectSend", Const, 1, ""}, + {"SendDir", Const, 0, ""}, + {"Slice", Const, 0, ""}, + {"SliceAt", Func, 23, "func(typ Type, p unsafe.Pointer, n int) Value"}, + {"SliceHeader", Type, 0, ""}, + {"SliceHeader.Cap", Field, 0, ""}, + {"SliceHeader.Data", Field, 0, ""}, + {"SliceHeader.Len", Field, 0, ""}, + {"SliceOf", Func, 1, "func(t Type) Type"}, + {"String", Const, 0, ""}, + {"StringHeader", Type, 0, ""}, + {"StringHeader.Data", Field, 0, ""}, + {"StringHeader.Len", Field, 0, ""}, + {"Struct", Const, 0, ""}, + {"StructField", Type, 0, ""}, + {"StructField.Anonymous", Field, 0, ""}, + {"StructField.Index", Field, 0, ""}, + {"StructField.Name", Field, 0, ""}, + {"StructField.Offset", Field, 0, ""}, + {"StructField.PkgPath", Field, 0, ""}, + {"StructField.Tag", Field, 0, ""}, + {"StructField.Type", Field, 0, ""}, + {"StructOf", Func, 7, "func(fields []StructField) Type"}, + {"StructTag", Type, 0, ""}, + {"Swapper", Func, 8, "func(slice any) func(i int, j int)"}, + {"Type", Type, 0, ""}, + {"TypeFor", Func, 22, "func[T any]() Type"}, + {"TypeOf", Func, 0, "func(i any) Type"}, + {"Uint", Const, 0, ""}, + {"Uint16", Const, 0, ""}, + {"Uint32", Const, 0, ""}, + {"Uint64", Const, 0, ""}, + {"Uint8", Const, 0, ""}, + {"Uintptr", Const, 0, ""}, + {"UnsafePointer", Const, 0, ""}, + {"Value", Type, 0, ""}, + {"ValueError", Type, 0, ""}, + {"ValueError.Kind", Field, 0, ""}, + {"ValueError.Method", Field, 0, ""}, + {"ValueOf", Func, 0, "func(i any) Value"}, + {"VisibleFields", Func, 17, "func(t Type) []StructField"}, + {"Zero", Func, 0, "func(typ Type) Value"}, + }, + "regexp": { + {"(*Regexp).AppendText", Method, 24, ""}, + {"(*Regexp).Copy", Method, 6, ""}, + {"(*Regexp).Expand", Method, 0, ""}, + {"(*Regexp).ExpandString", Method, 0, ""}, + {"(*Regexp).Find", Method, 0, ""}, + {"(*Regexp).FindAll", Method, 0, ""}, + {"(*Regexp).FindAllIndex", Method, 0, ""}, + {"(*Regexp).FindAllString", Method, 0, ""}, + {"(*Regexp).FindAllStringIndex", Method, 0, ""}, + {"(*Regexp).FindAllStringSubmatch", Method, 0, ""}, + {"(*Regexp).FindAllStringSubmatchIndex", Method, 0, ""}, + {"(*Regexp).FindAllSubmatch", Method, 0, ""}, + {"(*Regexp).FindAllSubmatchIndex", Method, 0, ""}, + {"(*Regexp).FindIndex", Method, 0, ""}, + {"(*Regexp).FindReaderIndex", Method, 0, ""}, + {"(*Regexp).FindReaderSubmatchIndex", Method, 0, ""}, + {"(*Regexp).FindString", Method, 0, ""}, + {"(*Regexp).FindStringIndex", Method, 0, ""}, + {"(*Regexp).FindStringSubmatch", Method, 0, ""}, + {"(*Regexp).FindStringSubmatchIndex", Method, 0, ""}, + {"(*Regexp).FindSubmatch", Method, 0, ""}, + {"(*Regexp).FindSubmatchIndex", Method, 0, ""}, + {"(*Regexp).LiteralPrefix", Method, 0, ""}, + {"(*Regexp).Longest", Method, 1, ""}, + {"(*Regexp).MarshalText", Method, 21, ""}, + {"(*Regexp).Match", Method, 0, ""}, + {"(*Regexp).MatchReader", Method, 0, ""}, + {"(*Regexp).MatchString", Method, 0, ""}, + {"(*Regexp).NumSubexp", Method, 0, ""}, + {"(*Regexp).ReplaceAll", Method, 0, ""}, + {"(*Regexp).ReplaceAllFunc", Method, 0, ""}, + {"(*Regexp).ReplaceAllLiteral", Method, 0, ""}, + {"(*Regexp).ReplaceAllLiteralString", Method, 0, ""}, + {"(*Regexp).ReplaceAllString", Method, 0, ""}, + {"(*Regexp).ReplaceAllStringFunc", Method, 0, ""}, + {"(*Regexp).Split", Method, 1, ""}, + {"(*Regexp).String", Method, 0, ""}, + {"(*Regexp).SubexpIndex", Method, 15, ""}, + {"(*Regexp).SubexpNames", Method, 0, ""}, + {"(*Regexp).UnmarshalText", Method, 21, ""}, + {"Compile", Func, 0, "func(expr string) (*Regexp, error)"}, + {"CompilePOSIX", Func, 0, "func(expr string) (*Regexp, error)"}, + {"Match", Func, 0, "func(pattern string, b []byte) (matched bool, err error)"}, + {"MatchReader", Func, 0, "func(pattern string, r io.RuneReader) (matched bool, err error)"}, + {"MatchString", Func, 0, "func(pattern string, s string) (matched bool, err error)"}, + {"MustCompile", Func, 0, "func(str string) *Regexp"}, + {"MustCompilePOSIX", Func, 0, "func(str string) *Regexp"}, + {"QuoteMeta", Func, 0, "func(s string) string"}, + {"Regexp", Type, 0, ""}, + }, + "regexp/syntax": { + {"(*Error).Error", Method, 0, ""}, + {"(*Inst).MatchEmptyWidth", Method, 0, ""}, + {"(*Inst).MatchRune", Method, 0, ""}, + {"(*Inst).MatchRunePos", Method, 3, ""}, + {"(*Inst).String", Method, 0, ""}, + {"(*Prog).Prefix", Method, 0, ""}, + {"(*Prog).StartCond", Method, 0, ""}, + {"(*Prog).String", Method, 0, ""}, + {"(*Regexp).CapNames", Method, 0, ""}, + {"(*Regexp).Equal", Method, 0, ""}, + {"(*Regexp).MaxCap", Method, 0, ""}, + {"(*Regexp).Simplify", Method, 0, ""}, + {"(*Regexp).String", Method, 0, ""}, + {"(ErrorCode).String", Method, 0, ""}, + {"(InstOp).String", Method, 3, ""}, + {"(Op).String", Method, 11, ""}, + {"ClassNL", Const, 0, ""}, + {"Compile", Func, 0, "func(re *Regexp) (*Prog, error)"}, + {"DotNL", Const, 0, ""}, + {"EmptyBeginLine", Const, 0, ""}, + {"EmptyBeginText", Const, 0, ""}, + {"EmptyEndLine", Const, 0, ""}, + {"EmptyEndText", Const, 0, ""}, + {"EmptyNoWordBoundary", Const, 0, ""}, + {"EmptyOp", Type, 0, ""}, + {"EmptyOpContext", Func, 0, "func(r1 rune, r2 rune) EmptyOp"}, + {"EmptyWordBoundary", Const, 0, ""}, + {"ErrInternalError", Const, 0, ""}, + {"ErrInvalidCharClass", Const, 0, ""}, + {"ErrInvalidCharRange", Const, 0, ""}, + {"ErrInvalidEscape", Const, 0, ""}, + {"ErrInvalidNamedCapture", Const, 0, ""}, + {"ErrInvalidPerlOp", Const, 0, ""}, + {"ErrInvalidRepeatOp", Const, 0, ""}, + {"ErrInvalidRepeatSize", Const, 0, ""}, + {"ErrInvalidUTF8", Const, 0, ""}, + {"ErrLarge", Const, 20, ""}, + {"ErrMissingBracket", Const, 0, ""}, + {"ErrMissingParen", Const, 0, ""}, + {"ErrMissingRepeatArgument", Const, 0, ""}, + {"ErrNestingDepth", Const, 19, ""}, + {"ErrTrailingBackslash", Const, 0, ""}, + {"ErrUnexpectedParen", Const, 1, ""}, + {"Error", Type, 0, ""}, + {"Error.Code", Field, 0, ""}, + {"Error.Expr", Field, 0, ""}, + {"ErrorCode", Type, 0, ""}, + {"Flags", Type, 0, ""}, + {"FoldCase", Const, 0, ""}, + {"Inst", Type, 0, ""}, + {"Inst.Arg", Field, 0, ""}, + {"Inst.Op", Field, 0, ""}, + {"Inst.Out", Field, 0, ""}, + {"Inst.Rune", Field, 0, ""}, + {"InstAlt", Const, 0, ""}, + {"InstAltMatch", Const, 0, ""}, + {"InstCapture", Const, 0, ""}, + {"InstEmptyWidth", Const, 0, ""}, + {"InstFail", Const, 0, ""}, + {"InstMatch", Const, 0, ""}, + {"InstNop", Const, 0, ""}, + {"InstOp", Type, 0, ""}, + {"InstRune", Const, 0, ""}, + {"InstRune1", Const, 0, ""}, + {"InstRuneAny", Const, 0, ""}, + {"InstRuneAnyNotNL", Const, 0, ""}, + {"IsWordChar", Func, 0, "func(r rune) bool"}, + {"Literal", Const, 0, ""}, + {"MatchNL", Const, 0, ""}, + {"NonGreedy", Const, 0, ""}, + {"OneLine", Const, 0, ""}, + {"Op", Type, 0, ""}, + {"OpAlternate", Const, 0, ""}, + {"OpAnyChar", Const, 0, ""}, + {"OpAnyCharNotNL", Const, 0, ""}, + {"OpBeginLine", Const, 0, ""}, + {"OpBeginText", Const, 0, ""}, + {"OpCapture", Const, 0, ""}, + {"OpCharClass", Const, 0, ""}, + {"OpConcat", Const, 0, ""}, + {"OpEmptyMatch", Const, 0, ""}, + {"OpEndLine", Const, 0, ""}, + {"OpEndText", Const, 0, ""}, + {"OpLiteral", Const, 0, ""}, + {"OpNoMatch", Const, 0, ""}, + {"OpNoWordBoundary", Const, 0, ""}, + {"OpPlus", Const, 0, ""}, + {"OpQuest", Const, 0, ""}, + {"OpRepeat", Const, 0, ""}, + {"OpStar", Const, 0, ""}, + {"OpWordBoundary", Const, 0, ""}, + {"POSIX", Const, 0, ""}, + {"Parse", Func, 0, "func(s string, flags Flags) (*Regexp, error)"}, + {"Perl", Const, 0, ""}, + {"PerlX", Const, 0, ""}, + {"Prog", Type, 0, ""}, + {"Prog.Inst", Field, 0, ""}, + {"Prog.NumCap", Field, 0, ""}, + {"Prog.Start", Field, 0, ""}, + {"Regexp", Type, 0, ""}, + {"Regexp.Cap", Field, 0, ""}, + {"Regexp.Flags", Field, 0, ""}, + {"Regexp.Max", Field, 0, ""}, + {"Regexp.Min", Field, 0, ""}, + {"Regexp.Name", Field, 0, ""}, + {"Regexp.Op", Field, 0, ""}, + {"Regexp.Rune", Field, 0, ""}, + {"Regexp.Rune0", Field, 0, ""}, + {"Regexp.Sub", Field, 0, ""}, + {"Regexp.Sub0", Field, 0, ""}, + {"Simple", Const, 0, ""}, + {"UnicodeGroups", Const, 0, ""}, + {"WasDollar", Const, 0, ""}, + }, + "runtime": { + {"(*BlockProfileRecord).Stack", Method, 1, ""}, + {"(*Frames).Next", Method, 7, ""}, + {"(*Func).Entry", Method, 0, ""}, + {"(*Func).FileLine", Method, 0, ""}, + {"(*Func).Name", Method, 0, ""}, + {"(*MemProfileRecord).InUseBytes", Method, 0, ""}, + {"(*MemProfileRecord).InUseObjects", Method, 0, ""}, + {"(*MemProfileRecord).Stack", Method, 0, ""}, + {"(*PanicNilError).Error", Method, 21, ""}, + {"(*PanicNilError).RuntimeError", Method, 21, ""}, + {"(*Pinner).Pin", Method, 21, ""}, + {"(*Pinner).Unpin", Method, 21, ""}, + {"(*StackRecord).Stack", Method, 0, ""}, + {"(*TypeAssertionError).Error", Method, 0, ""}, + {"(*TypeAssertionError).RuntimeError", Method, 0, ""}, + {"(Cleanup).Stop", Method, 24, ""}, + {"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"}, + {"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"}, + {"BlockProfileRecord", Type, 1, ""}, + {"BlockProfileRecord.Count", Field, 1, ""}, + {"BlockProfileRecord.Cycles", Field, 1, ""}, + {"BlockProfileRecord.StackRecord", Field, 1, ""}, + {"Breakpoint", Func, 0, "func()"}, + {"CPUProfile", Func, 0, "func() []byte"}, + {"Caller", Func, 0, "func(skip int) (pc uintptr, file string, line int, ok bool)"}, + {"Callers", Func, 0, "func(skip int, pc []uintptr) int"}, + {"CallersFrames", Func, 7, "func(callers []uintptr) *Frames"}, + {"Cleanup", Type, 24, ""}, + {"Compiler", Const, 0, ""}, + {"Error", Type, 0, ""}, + {"Frame", Type, 7, ""}, + {"Frame.Entry", Field, 7, ""}, + {"Frame.File", Field, 7, ""}, + {"Frame.Func", Field, 7, ""}, + {"Frame.Function", Field, 7, ""}, + {"Frame.Line", Field, 7, ""}, + {"Frame.PC", Field, 7, ""}, + {"Frames", Type, 7, ""}, + {"Func", Type, 0, ""}, + {"FuncForPC", Func, 0, "func(pc uintptr) *Func"}, + {"GC", Func, 0, "func()"}, + {"GOARCH", Const, 0, ""}, + {"GOMAXPROCS", Func, 0, "func(n int) int"}, + {"GOOS", Const, 0, ""}, + {"GOROOT", Func, 0, "func() string"}, + {"Goexit", Func, 0, "func()"}, + {"GoroutineProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"}, + {"Gosched", Func, 0, "func()"}, + {"KeepAlive", Func, 7, "func(x any)"}, + {"LockOSThread", Func, 0, "func()"}, + {"MemProfile", Func, 0, "func(p []MemProfileRecord, inuseZero bool) (n int, ok bool)"}, + {"MemProfileRate", Var, 0, ""}, + {"MemProfileRecord", Type, 0, ""}, + {"MemProfileRecord.AllocBytes", Field, 0, ""}, + {"MemProfileRecord.AllocObjects", Field, 0, ""}, + {"MemProfileRecord.FreeBytes", Field, 0, ""}, + {"MemProfileRecord.FreeObjects", Field, 0, ""}, + {"MemProfileRecord.Stack0", Field, 0, ""}, + {"MemStats", Type, 0, ""}, + {"MemStats.Alloc", Field, 0, ""}, + {"MemStats.BuckHashSys", Field, 0, ""}, + {"MemStats.BySize", Field, 0, ""}, + {"MemStats.DebugGC", Field, 0, ""}, + {"MemStats.EnableGC", Field, 0, ""}, + {"MemStats.Frees", Field, 0, ""}, + {"MemStats.GCCPUFraction", Field, 5, ""}, + {"MemStats.GCSys", Field, 2, ""}, + {"MemStats.HeapAlloc", Field, 0, ""}, + {"MemStats.HeapIdle", Field, 0, ""}, + {"MemStats.HeapInuse", Field, 0, ""}, + {"MemStats.HeapObjects", Field, 0, ""}, + {"MemStats.HeapReleased", Field, 0, ""}, + {"MemStats.HeapSys", Field, 0, ""}, + {"MemStats.LastGC", Field, 0, ""}, + {"MemStats.Lookups", Field, 0, ""}, + {"MemStats.MCacheInuse", Field, 0, ""}, + {"MemStats.MCacheSys", Field, 0, ""}, + {"MemStats.MSpanInuse", Field, 0, ""}, + {"MemStats.MSpanSys", Field, 0, ""}, + {"MemStats.Mallocs", Field, 0, ""}, + {"MemStats.NextGC", Field, 0, ""}, + {"MemStats.NumForcedGC", Field, 8, ""}, + {"MemStats.NumGC", Field, 0, ""}, + {"MemStats.OtherSys", Field, 2, ""}, + {"MemStats.PauseEnd", Field, 4, ""}, + {"MemStats.PauseNs", Field, 0, ""}, + {"MemStats.PauseTotalNs", Field, 0, ""}, + {"MemStats.StackInuse", Field, 0, ""}, + {"MemStats.StackSys", Field, 0, ""}, + {"MemStats.Sys", Field, 0, ""}, + {"MemStats.TotalAlloc", Field, 0, ""}, + {"MutexProfile", Func, 8, "func(p []BlockProfileRecord) (n int, ok bool)"}, + {"NumCPU", Func, 0, "func() int"}, + {"NumCgoCall", Func, 0, "func() int64"}, + {"NumGoroutine", Func, 0, "func() int"}, + {"PanicNilError", Type, 21, ""}, + {"Pinner", Type, 21, ""}, + {"ReadMemStats", Func, 0, "func(m *MemStats)"}, + {"ReadTrace", Func, 5, "func() []byte"}, + {"SetBlockProfileRate", Func, 1, "func(rate int)"}, + {"SetCPUProfileRate", Func, 0, "func(hz int)"}, + {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"}, + {"SetFinalizer", Func, 0, "func(obj any, finalizer any)"}, + {"SetMutexProfileFraction", Func, 8, "func(rate int) int"}, + {"Stack", Func, 0, "func(buf []byte, all bool) int"}, + {"StackRecord", Type, 0, ""}, + {"StackRecord.Stack0", Field, 0, ""}, + {"StartTrace", Func, 5, "func() error"}, + {"StopTrace", Func, 5, "func()"}, + {"ThreadCreateProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"}, + {"TypeAssertionError", Type, 0, ""}, + {"UnlockOSThread", Func, 0, "func()"}, + {"Version", Func, 0, "func() string"}, + }, + "runtime/cgo": { + {"(Handle).Delete", Method, 17, ""}, + {"(Handle).Value", Method, 17, ""}, + {"Handle", Type, 17, ""}, + {"Incomplete", Type, 20, ""}, + {"NewHandle", Func, 17, ""}, + }, + "runtime/coverage": { + {"ClearCounters", Func, 20, "func() error"}, + {"WriteCounters", Func, 20, "func(w io.Writer) error"}, + {"WriteCountersDir", Func, 20, "func(dir string) error"}, + {"WriteMeta", Func, 20, "func(w io.Writer) error"}, + {"WriteMetaDir", Func, 20, "func(dir string) error"}, + }, + "runtime/debug": { + {"(*BuildInfo).String", Method, 18, ""}, + {"BuildInfo", Type, 12, ""}, + {"BuildInfo.Deps", Field, 12, ""}, + {"BuildInfo.GoVersion", Field, 18, ""}, + {"BuildInfo.Main", Field, 12, ""}, + {"BuildInfo.Path", Field, 12, ""}, + {"BuildInfo.Settings", Field, 18, ""}, + {"BuildSetting", Type, 18, ""}, + {"BuildSetting.Key", Field, 18, ""}, + {"BuildSetting.Value", Field, 18, ""}, + {"CrashOptions", Type, 23, ""}, + {"FreeOSMemory", Func, 1, "func()"}, + {"GCStats", Type, 1, ""}, + {"GCStats.LastGC", Field, 1, ""}, + {"GCStats.NumGC", Field, 1, ""}, + {"GCStats.Pause", Field, 1, ""}, + {"GCStats.PauseEnd", Field, 4, ""}, + {"GCStats.PauseQuantiles", Field, 1, ""}, + {"GCStats.PauseTotal", Field, 1, ""}, + {"Module", Type, 12, ""}, + {"Module.Path", Field, 12, ""}, + {"Module.Replace", Field, 12, ""}, + {"Module.Sum", Field, 12, ""}, + {"Module.Version", Field, 12, ""}, + {"ParseBuildInfo", Func, 18, "func(data string) (bi *BuildInfo, err error)"}, + {"PrintStack", Func, 0, "func()"}, + {"ReadBuildInfo", Func, 12, "func() (info *BuildInfo, ok bool)"}, + {"ReadGCStats", Func, 1, "func(stats *GCStats)"}, + {"SetCrashOutput", Func, 23, "func(f *os.File, opts CrashOptions) error"}, + {"SetGCPercent", Func, 1, "func(percent int) int"}, + {"SetMaxStack", Func, 2, "func(bytes int) int"}, + {"SetMaxThreads", Func, 2, "func(threads int) int"}, + {"SetMemoryLimit", Func, 19, "func(limit int64) int64"}, + {"SetPanicOnFault", Func, 3, "func(enabled bool) bool"}, + {"SetTraceback", Func, 6, "func(level string)"}, + {"Stack", Func, 0, "func() []byte"}, + {"WriteHeapDump", Func, 3, "func(fd uintptr)"}, + }, + "runtime/metrics": { + {"(Value).Float64", Method, 16, ""}, + {"(Value).Float64Histogram", Method, 16, ""}, + {"(Value).Kind", Method, 16, ""}, + {"(Value).Uint64", Method, 16, ""}, + {"All", Func, 16, "func() []Description"}, + {"Description", Type, 16, ""}, + {"Description.Cumulative", Field, 16, ""}, + {"Description.Description", Field, 16, ""}, + {"Description.Kind", Field, 16, ""}, + {"Description.Name", Field, 16, ""}, + {"Float64Histogram", Type, 16, ""}, + {"Float64Histogram.Buckets", Field, 16, ""}, + {"Float64Histogram.Counts", Field, 16, ""}, + {"KindBad", Const, 16, ""}, + {"KindFloat64", Const, 16, ""}, + {"KindFloat64Histogram", Const, 16, ""}, + {"KindUint64", Const, 16, ""}, + {"Read", Func, 16, "func(m []Sample)"}, + {"Sample", Type, 16, ""}, + {"Sample.Name", Field, 16, ""}, + {"Sample.Value", Field, 16, ""}, + {"Value", Type, 16, ""}, + {"ValueKind", Type, 16, ""}, + }, + "runtime/pprof": { + {"(*Profile).Add", Method, 0, ""}, + {"(*Profile).Count", Method, 0, ""}, + {"(*Profile).Name", Method, 0, ""}, + {"(*Profile).Remove", Method, 0, ""}, + {"(*Profile).WriteTo", Method, 0, ""}, + {"Do", Func, 9, "func(ctx context.Context, labels LabelSet, f func(context.Context))"}, + {"ForLabels", Func, 9, "func(ctx context.Context, f func(key string, value string) bool)"}, + {"Label", Func, 9, "func(ctx context.Context, key string) (string, bool)"}, + {"LabelSet", Type, 9, ""}, + {"Labels", Func, 9, "func(args ...string) LabelSet"}, + {"Lookup", Func, 0, "func(name string) *Profile"}, + {"NewProfile", Func, 0, "func(name string) *Profile"}, + {"Profile", Type, 0, ""}, + {"Profiles", Func, 0, "func() []*Profile"}, + {"SetGoroutineLabels", Func, 9, "func(ctx context.Context)"}, + {"StartCPUProfile", Func, 0, "func(w io.Writer) error"}, + {"StopCPUProfile", Func, 0, "func()"}, + {"WithLabels", Func, 9, "func(ctx context.Context, labels LabelSet) context.Context"}, + {"WriteHeapProfile", Func, 0, "func(w io.Writer) error"}, + }, + "runtime/trace": { + {"(*Region).End", Method, 11, ""}, + {"(*Task).End", Method, 11, ""}, + {"IsEnabled", Func, 11, "func() bool"}, + {"Log", Func, 11, "func(ctx context.Context, category string, message string)"}, + {"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"}, + {"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"}, + {"Region", Type, 11, ""}, + {"Start", Func, 5, "func(w io.Writer) error"}, + {"StartRegion", Func, 11, "func(ctx context.Context, regionType string) *Region"}, + {"Stop", Func, 5, "func()"}, + {"Task", Type, 11, ""}, + {"WithRegion", Func, 11, "func(ctx context.Context, regionType string, fn func())"}, + }, + "slices": { + {"All", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"}, + {"AppendSeq", Func, 23, "func[Slice ~[]E, E any](s Slice, seq iter.Seq[E]) Slice"}, + {"Backward", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"}, + {"BinarySearch", Func, 21, "func[S ~[]E, E cmp.Ordered](x S, target E) (int, bool)"}, + {"BinarySearchFunc", Func, 21, "func[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool)"}, + {"Chunk", Func, 23, "func[Slice ~[]E, E any](s Slice, n int) iter.Seq[Slice]"}, + {"Clip", Func, 21, "func[S ~[]E, E any](s S) S"}, + {"Clone", Func, 21, "func[S ~[]E, E any](s S) S"}, + {"Collect", Func, 23, "func[E any](seq iter.Seq[E]) []E"}, + {"Compact", Func, 21, "func[S ~[]E, E comparable](s S) S"}, + {"CompactFunc", Func, 21, "func[S ~[]E, E any](s S, eq func(E, E) bool) S"}, + {"Compare", Func, 21, "func[S ~[]E, E cmp.Ordered](s1 S, s2 S) int"}, + {"CompareFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int"}, + {"Concat", Func, 22, "func[S ~[]E, E any](slices ...S) S"}, + {"Contains", Func, 21, "func[S ~[]E, E comparable](s S, v E) bool"}, + {"ContainsFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) bool"}, + {"Delete", Func, 21, "func[S ~[]E, E any](s S, i int, j int) S"}, + {"DeleteFunc", Func, 21, "func[S ~[]E, E any](s S, del func(E) bool) S"}, + {"Equal", Func, 21, "func[S ~[]E, E comparable](s1 S, s2 S) bool"}, + {"EqualFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool"}, + {"Grow", Func, 21, "func[S ~[]E, E any](s S, n int) S"}, + {"Index", Func, 21, "func[S ~[]E, E comparable](s S, v E) int"}, + {"IndexFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) int"}, + {"Insert", Func, 21, "func[S ~[]E, E any](s S, i int, v ...E) S"}, + {"IsSorted", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) bool"}, + {"IsSortedFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) bool"}, + {"Max", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"}, + {"MaxFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"}, + {"Min", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"}, + {"MinFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"}, + {"Repeat", Func, 23, "func[S ~[]E, E any](x S, count int) S"}, + {"Replace", Func, 21, "func[S ~[]E, E any](s S, i int, j int, v ...E) S"}, + {"Reverse", Func, 21, "func[S ~[]E, E any](s S)"}, + {"Sort", Func, 21, "func[S ~[]E, E cmp.Ordered](x S)"}, + {"SortFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"}, + {"SortStableFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"}, + {"Sorted", Func, 23, "func[E cmp.Ordered](seq iter.Seq[E]) []E"}, + {"SortedFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"}, + {"SortedStableFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"}, + {"Values", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq[E]"}, + }, + "sort": { + {"(Float64Slice).Len", Method, 0, ""}, + {"(Float64Slice).Less", Method, 0, ""}, + {"(Float64Slice).Search", Method, 0, ""}, + {"(Float64Slice).Sort", Method, 0, ""}, + {"(Float64Slice).Swap", Method, 0, ""}, + {"(IntSlice).Len", Method, 0, ""}, + {"(IntSlice).Less", Method, 0, ""}, + {"(IntSlice).Search", Method, 0, ""}, + {"(IntSlice).Sort", Method, 0, ""}, + {"(IntSlice).Swap", Method, 0, ""}, + {"(StringSlice).Len", Method, 0, ""}, + {"(StringSlice).Less", Method, 0, ""}, + {"(StringSlice).Search", Method, 0, ""}, + {"(StringSlice).Sort", Method, 0, ""}, + {"(StringSlice).Swap", Method, 0, ""}, + {"Find", Func, 19, "func(n int, cmp func(int) int) (i int, found bool)"}, + {"Float64Slice", Type, 0, ""}, + {"Float64s", Func, 0, "func(x []float64)"}, + {"Float64sAreSorted", Func, 0, "func(x []float64) bool"}, + {"IntSlice", Type, 0, ""}, + {"Interface", Type, 0, ""}, + {"Ints", Func, 0, "func(x []int)"}, + {"IntsAreSorted", Func, 0, "func(x []int) bool"}, + {"IsSorted", Func, 0, "func(data Interface) bool"}, + {"Reverse", Func, 1, "func(data Interface) Interface"}, + {"Search", Func, 0, "func(n int, f func(int) bool) int"}, + {"SearchFloat64s", Func, 0, "func(a []float64, x float64) int"}, + {"SearchInts", Func, 0, "func(a []int, x int) int"}, + {"SearchStrings", Func, 0, "func(a []string, x string) int"}, + {"Slice", Func, 8, "func(x any, less func(i int, j int) bool)"}, + {"SliceIsSorted", Func, 8, "func(x any, less func(i int, j int) bool) bool"}, + {"SliceStable", Func, 8, "func(x any, less func(i int, j int) bool)"}, + {"Sort", Func, 0, "func(data Interface)"}, + {"Stable", Func, 2, "func(data Interface)"}, + {"StringSlice", Type, 0, ""}, + {"Strings", Func, 0, "func(x []string)"}, + {"StringsAreSorted", Func, 0, "func(x []string) bool"}, + }, + "strconv": { + {"(*NumError).Error", Method, 0, ""}, + {"(*NumError).Unwrap", Method, 14, ""}, + {"AppendBool", Func, 0, "func(dst []byte, b bool) []byte"}, + {"AppendFloat", Func, 0, "func(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte"}, + {"AppendInt", Func, 0, "func(dst []byte, i int64, base int) []byte"}, + {"AppendQuote", Func, 0, "func(dst []byte, s string) []byte"}, + {"AppendQuoteRune", Func, 0, "func(dst []byte, r rune) []byte"}, + {"AppendQuoteRuneToASCII", Func, 0, "func(dst []byte, r rune) []byte"}, + {"AppendQuoteRuneToGraphic", Func, 6, "func(dst []byte, r rune) []byte"}, + {"AppendQuoteToASCII", Func, 0, "func(dst []byte, s string) []byte"}, + {"AppendQuoteToGraphic", Func, 6, "func(dst []byte, s string) []byte"}, + {"AppendUint", Func, 0, "func(dst []byte, i uint64, base int) []byte"}, + {"Atoi", Func, 0, "func(s string) (int, error)"}, + {"CanBackquote", Func, 0, "func(s string) bool"}, + {"ErrRange", Var, 0, ""}, + {"ErrSyntax", Var, 0, ""}, + {"FormatBool", Func, 0, "func(b bool) string"}, + {"FormatComplex", Func, 15, "func(c complex128, fmt byte, prec int, bitSize int) string"}, + {"FormatFloat", Func, 0, "func(f float64, fmt byte, prec int, bitSize int) string"}, + {"FormatInt", Func, 0, "func(i int64, base int) string"}, + {"FormatUint", Func, 0, "func(i uint64, base int) string"}, + {"IntSize", Const, 0, ""}, + {"IsGraphic", Func, 6, "func(r rune) bool"}, + {"IsPrint", Func, 0, "func(r rune) bool"}, + {"Itoa", Func, 0, "func(i int) string"}, + {"NumError", Type, 0, ""}, + {"NumError.Err", Field, 0, ""}, + {"NumError.Func", Field, 0, ""}, + {"NumError.Num", Field, 0, ""}, + {"ParseBool", Func, 0, "func(str string) (bool, error)"}, + {"ParseComplex", Func, 15, "func(s string, bitSize int) (complex128, error)"}, + {"ParseFloat", Func, 0, "func(s string, bitSize int) (float64, error)"}, + {"ParseInt", Func, 0, "func(s string, base int, bitSize int) (i int64, err error)"}, + {"ParseUint", Func, 0, "func(s string, base int, bitSize int) (uint64, error)"}, + {"Quote", Func, 0, "func(s string) string"}, + {"QuoteRune", Func, 0, "func(r rune) string"}, + {"QuoteRuneToASCII", Func, 0, "func(r rune) string"}, + {"QuoteRuneToGraphic", Func, 6, "func(r rune) string"}, + {"QuoteToASCII", Func, 0, "func(s string) string"}, + {"QuoteToGraphic", Func, 6, "func(s string) string"}, + {"QuotedPrefix", Func, 17, "func(s string) (string, error)"}, + {"Unquote", Func, 0, "func(s string) (string, error)"}, + {"UnquoteChar", Func, 0, "func(s string, quote byte) (value rune, multibyte bool, tail string, err error)"}, + }, + "strings": { + {"(*Builder).Cap", Method, 12, ""}, + {"(*Builder).Grow", Method, 10, ""}, + {"(*Builder).Len", Method, 10, ""}, + {"(*Builder).Reset", Method, 10, ""}, + {"(*Builder).String", Method, 10, ""}, + {"(*Builder).Write", Method, 10, ""}, + {"(*Builder).WriteByte", Method, 10, ""}, + {"(*Builder).WriteRune", Method, 10, ""}, + {"(*Builder).WriteString", Method, 10, ""}, + {"(*Reader).Len", Method, 0, ""}, + {"(*Reader).Read", Method, 0, ""}, + {"(*Reader).ReadAt", Method, 0, ""}, + {"(*Reader).ReadByte", Method, 0, ""}, + {"(*Reader).ReadRune", Method, 0, ""}, + {"(*Reader).Reset", Method, 7, ""}, + {"(*Reader).Seek", Method, 0, ""}, + {"(*Reader).Size", Method, 5, ""}, + {"(*Reader).UnreadByte", Method, 0, ""}, + {"(*Reader).UnreadRune", Method, 0, ""}, + {"(*Reader).WriteTo", Method, 1, ""}, + {"(*Replacer).Replace", Method, 0, ""}, + {"(*Replacer).WriteString", Method, 0, ""}, + {"Builder", Type, 10, ""}, + {"Clone", Func, 18, "func(s string) string"}, + {"Compare", Func, 5, "func(a string, b string) int"}, + {"Contains", Func, 0, "func(s string, substr string) bool"}, + {"ContainsAny", Func, 0, "func(s string, chars string) bool"}, + {"ContainsFunc", Func, 21, "func(s string, f func(rune) bool) bool"}, + {"ContainsRune", Func, 0, "func(s string, r rune) bool"}, + {"Count", Func, 0, "func(s string, substr string) int"}, + {"Cut", Func, 18, "func(s string, sep string) (before string, after string, found bool)"}, + {"CutPrefix", Func, 20, "func(s string, prefix string) (after string, found bool)"}, + {"CutSuffix", Func, 20, "func(s string, suffix string) (before string, found bool)"}, + {"EqualFold", Func, 0, "func(s string, t string) bool"}, + {"Fields", Func, 0, "func(s string) []string"}, + {"FieldsFunc", Func, 0, "func(s string, f func(rune) bool) []string"}, + {"FieldsFuncSeq", Func, 24, "func(s string, f func(rune) bool) iter.Seq[string]"}, + {"FieldsSeq", Func, 24, "func(s string) iter.Seq[string]"}, + {"HasPrefix", Func, 0, "func(s string, prefix string) bool"}, + {"HasSuffix", Func, 0, "func(s string, suffix string) bool"}, + {"Index", Func, 0, "func(s string, substr string) int"}, + {"IndexAny", Func, 0, "func(s string, chars string) int"}, + {"IndexByte", Func, 2, "func(s string, c byte) int"}, + {"IndexFunc", Func, 0, "func(s string, f func(rune) bool) int"}, + {"IndexRune", Func, 0, "func(s string, r rune) int"}, + {"Join", Func, 0, "func(elems []string, sep string) string"}, + {"LastIndex", Func, 0, "func(s string, substr string) int"}, + {"LastIndexAny", Func, 0, "func(s string, chars string) int"}, + {"LastIndexByte", Func, 5, "func(s string, c byte) int"}, + {"LastIndexFunc", Func, 0, "func(s string, f func(rune) bool) int"}, + {"Lines", Func, 24, "func(s string) iter.Seq[string]"}, + {"Map", Func, 0, "func(mapping func(rune) rune, s string) string"}, + {"NewReader", Func, 0, "func(s string) *Reader"}, + {"NewReplacer", Func, 0, "func(oldnew ...string) *Replacer"}, + {"Reader", Type, 0, ""}, + {"Repeat", Func, 0, "func(s string, count int) string"}, + {"Replace", Func, 0, "func(s string, old string, new string, n int) string"}, + {"ReplaceAll", Func, 12, "func(s string, old string, new string) string"}, + {"Replacer", Type, 0, ""}, + {"Split", Func, 0, "func(s string, sep string) []string"}, + {"SplitAfter", Func, 0, "func(s string, sep string) []string"}, + {"SplitAfterN", Func, 0, "func(s string, sep string, n int) []string"}, + {"SplitAfterSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"}, + {"SplitN", Func, 0, "func(s string, sep string, n int) []string"}, + {"SplitSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"}, + {"Title", Func, 0, "func(s string) string"}, + {"ToLower", Func, 0, "func(s string) string"}, + {"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"}, + {"ToTitle", Func, 0, "func(s string) string"}, + {"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"}, + {"ToUpper", Func, 0, "func(s string) string"}, + {"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"}, + {"ToValidUTF8", Func, 13, "func(s string, replacement string) string"}, + {"Trim", Func, 0, "func(s string, cutset string) string"}, + {"TrimFunc", Func, 0, "func(s string, f func(rune) bool) string"}, + {"TrimLeft", Func, 0, "func(s string, cutset string) string"}, + {"TrimLeftFunc", Func, 0, "func(s string, f func(rune) bool) string"}, + {"TrimPrefix", Func, 1, "func(s string, prefix string) string"}, + {"TrimRight", Func, 0, "func(s string, cutset string) string"}, + {"TrimRightFunc", Func, 0, "func(s string, f func(rune) bool) string"}, + {"TrimSpace", Func, 0, "func(s string) string"}, + {"TrimSuffix", Func, 1, "func(s string, suffix string) string"}, + }, + "structs": { + {"HostLayout", Type, 23, ""}, + }, + "sync": { + {"(*Cond).Broadcast", Method, 0, ""}, + {"(*Cond).Signal", Method, 0, ""}, + {"(*Cond).Wait", Method, 0, ""}, + {"(*Map).Clear", Method, 23, ""}, + {"(*Map).CompareAndDelete", Method, 20, ""}, + {"(*Map).CompareAndSwap", Method, 20, ""}, + {"(*Map).Delete", Method, 9, ""}, + {"(*Map).Load", Method, 9, ""}, + {"(*Map).LoadAndDelete", Method, 15, ""}, + {"(*Map).LoadOrStore", Method, 9, ""}, + {"(*Map).Range", Method, 9, ""}, + {"(*Map).Store", Method, 9, ""}, + {"(*Map).Swap", Method, 20, ""}, + {"(*Mutex).Lock", Method, 0, ""}, + {"(*Mutex).TryLock", Method, 18, ""}, + {"(*Mutex).Unlock", Method, 0, ""}, + {"(*Once).Do", Method, 0, ""}, + {"(*Pool).Get", Method, 3, ""}, + {"(*Pool).Put", Method, 3, ""}, + {"(*RWMutex).Lock", Method, 0, ""}, + {"(*RWMutex).RLock", Method, 0, ""}, + {"(*RWMutex).RLocker", Method, 0, ""}, + {"(*RWMutex).RUnlock", Method, 0, ""}, + {"(*RWMutex).TryLock", Method, 18, ""}, + {"(*RWMutex).TryRLock", Method, 18, ""}, + {"(*RWMutex).Unlock", Method, 0, ""}, + {"(*WaitGroup).Add", Method, 0, ""}, + {"(*WaitGroup).Done", Method, 0, ""}, + {"(*WaitGroup).Go", Method, 25, ""}, + {"(*WaitGroup).Wait", Method, 0, ""}, + {"Cond", Type, 0, ""}, + {"Cond.L", Field, 0, ""}, + {"Locker", Type, 0, ""}, + {"Map", Type, 9, ""}, + {"Mutex", Type, 0, ""}, + {"NewCond", Func, 0, "func(l Locker) *Cond"}, + {"Once", Type, 0, ""}, + {"OnceFunc", Func, 21, "func(f func()) func()"}, + {"OnceValue", Func, 21, "func[T any](f func() T) func() T"}, + {"OnceValues", Func, 21, "func[T1, T2 any](f func() (T1, T2)) func() (T1, T2)"}, + {"Pool", Type, 3, ""}, + {"Pool.New", Field, 3, ""}, + {"RWMutex", Type, 0, ""}, + {"WaitGroup", Type, 0, ""}, + }, + "sync/atomic": { + {"(*Bool).CompareAndSwap", Method, 19, ""}, + {"(*Bool).Load", Method, 19, ""}, + {"(*Bool).Store", Method, 19, ""}, + {"(*Bool).Swap", Method, 19, ""}, + {"(*Int32).Add", Method, 19, ""}, + {"(*Int32).And", Method, 23, ""}, + {"(*Int32).CompareAndSwap", Method, 19, ""}, + {"(*Int32).Load", Method, 19, ""}, + {"(*Int32).Or", Method, 23, ""}, + {"(*Int32).Store", Method, 19, ""}, + {"(*Int32).Swap", Method, 19, ""}, + {"(*Int64).Add", Method, 19, ""}, + {"(*Int64).And", Method, 23, ""}, + {"(*Int64).CompareAndSwap", Method, 19, ""}, + {"(*Int64).Load", Method, 19, ""}, + {"(*Int64).Or", Method, 23, ""}, + {"(*Int64).Store", Method, 19, ""}, + {"(*Int64).Swap", Method, 19, ""}, + {"(*Pointer).CompareAndSwap", Method, 19, ""}, + {"(*Pointer).Load", Method, 19, ""}, + {"(*Pointer).Store", Method, 19, ""}, + {"(*Pointer).Swap", Method, 19, ""}, + {"(*Uint32).Add", Method, 19, ""}, + {"(*Uint32).And", Method, 23, ""}, + {"(*Uint32).CompareAndSwap", Method, 19, ""}, + {"(*Uint32).Load", Method, 19, ""}, + {"(*Uint32).Or", Method, 23, ""}, + {"(*Uint32).Store", Method, 19, ""}, + {"(*Uint32).Swap", Method, 19, ""}, + {"(*Uint64).Add", Method, 19, ""}, + {"(*Uint64).And", Method, 23, ""}, + {"(*Uint64).CompareAndSwap", Method, 19, ""}, + {"(*Uint64).Load", Method, 19, ""}, + {"(*Uint64).Or", Method, 23, ""}, + {"(*Uint64).Store", Method, 19, ""}, + {"(*Uint64).Swap", Method, 19, ""}, + {"(*Uintptr).Add", Method, 19, ""}, + {"(*Uintptr).And", Method, 23, ""}, + {"(*Uintptr).CompareAndSwap", Method, 19, ""}, + {"(*Uintptr).Load", Method, 19, ""}, + {"(*Uintptr).Or", Method, 23, ""}, + {"(*Uintptr).Store", Method, 19, ""}, + {"(*Uintptr).Swap", Method, 19, ""}, + {"(*Value).CompareAndSwap", Method, 17, ""}, + {"(*Value).Load", Method, 4, ""}, + {"(*Value).Store", Method, 4, ""}, + {"(*Value).Swap", Method, 17, ""}, + {"AddInt32", Func, 0, "func(addr *int32, delta int32) (new int32)"}, + {"AddInt64", Func, 0, "func(addr *int64, delta int64) (new int64)"}, + {"AddUint32", Func, 0, "func(addr *uint32, delta uint32) (new uint32)"}, + {"AddUint64", Func, 0, "func(addr *uint64, delta uint64) (new uint64)"}, + {"AddUintptr", Func, 0, "func(addr *uintptr, delta uintptr) (new uintptr)"}, + {"AndInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"}, + {"AndInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"}, + {"AndUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"}, + {"AndUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"}, + {"AndUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"}, + {"Bool", Type, 19, ""}, + {"CompareAndSwapInt32", Func, 0, "func(addr *int32, old int32, new int32) (swapped bool)"}, + {"CompareAndSwapInt64", Func, 0, "func(addr *int64, old int64, new int64) (swapped bool)"}, + {"CompareAndSwapPointer", Func, 0, "func(addr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) (swapped bool)"}, + {"CompareAndSwapUint32", Func, 0, "func(addr *uint32, old uint32, new uint32) (swapped bool)"}, + {"CompareAndSwapUint64", Func, 0, "func(addr *uint64, old uint64, new uint64) (swapped bool)"}, + {"CompareAndSwapUintptr", Func, 0, "func(addr *uintptr, old uintptr, new uintptr) (swapped bool)"}, + {"Int32", Type, 19, ""}, + {"Int64", Type, 19, ""}, + {"LoadInt32", Func, 0, "func(addr *int32) (val int32)"}, + {"LoadInt64", Func, 0, "func(addr *int64) (val int64)"}, + {"LoadPointer", Func, 0, "func(addr *unsafe.Pointer) (val unsafe.Pointer)"}, + {"LoadUint32", Func, 0, "func(addr *uint32) (val uint32)"}, + {"LoadUint64", Func, 0, "func(addr *uint64) (val uint64)"}, + {"LoadUintptr", Func, 0, "func(addr *uintptr) (val uintptr)"}, + {"OrInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"}, + {"OrInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"}, + {"OrUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"}, + {"OrUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"}, + {"OrUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"}, + {"Pointer", Type, 19, ""}, + {"StoreInt32", Func, 0, "func(addr *int32, val int32)"}, + {"StoreInt64", Func, 0, "func(addr *int64, val int64)"}, + {"StorePointer", Func, 0, "func(addr *unsafe.Pointer, val unsafe.Pointer)"}, + {"StoreUint32", Func, 0, "func(addr *uint32, val uint32)"}, + {"StoreUint64", Func, 0, "func(addr *uint64, val uint64)"}, + {"StoreUintptr", Func, 0, "func(addr *uintptr, val uintptr)"}, + {"SwapInt32", Func, 2, "func(addr *int32, new int32) (old int32)"}, + {"SwapInt64", Func, 2, "func(addr *int64, new int64) (old int64)"}, + {"SwapPointer", Func, 2, "func(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)"}, + {"SwapUint32", Func, 2, "func(addr *uint32, new uint32) (old uint32)"}, + {"SwapUint64", Func, 2, "func(addr *uint64, new uint64) (old uint64)"}, + {"SwapUintptr", Func, 2, "func(addr *uintptr, new uintptr) (old uintptr)"}, + {"Uint32", Type, 19, ""}, + {"Uint64", Type, 19, ""}, + {"Uintptr", Type, 19, ""}, + {"Value", Type, 4, ""}, + }, + "syscall": { + {"(*Cmsghdr).SetLen", Method, 0, ""}, + {"(*DLL).FindProc", Method, 0, ""}, + {"(*DLL).MustFindProc", Method, 0, ""}, + {"(*DLL).Release", Method, 0, ""}, + {"(*DLLError).Error", Method, 0, ""}, + {"(*DLLError).Unwrap", Method, 16, ""}, + {"(*Filetime).Nanoseconds", Method, 0, ""}, + {"(*Iovec).SetLen", Method, 0, ""}, + {"(*LazyDLL).Handle", Method, 0, ""}, + {"(*LazyDLL).Load", Method, 0, ""}, + {"(*LazyDLL).NewProc", Method, 0, ""}, + {"(*LazyProc).Addr", Method, 0, ""}, + {"(*LazyProc).Call", Method, 0, ""}, + {"(*LazyProc).Find", Method, 0, ""}, + {"(*Msghdr).SetControllen", Method, 0, ""}, + {"(*Proc).Addr", Method, 0, ""}, + {"(*Proc).Call", Method, 0, ""}, + {"(*PtraceRegs).PC", Method, 0, ""}, + {"(*PtraceRegs).SetPC", Method, 0, ""}, + {"(*RawSockaddrAny).Sockaddr", Method, 0, ""}, + {"(*SID).Copy", Method, 0, ""}, + {"(*SID).Len", Method, 0, ""}, + {"(*SID).LookupAccount", Method, 0, ""}, + {"(*SID).String", Method, 0, ""}, + {"(*Timespec).Nano", Method, 0, ""}, + {"(*Timespec).Unix", Method, 0, ""}, + {"(*Timeval).Nano", Method, 0, ""}, + {"(*Timeval).Nanoseconds", Method, 0, ""}, + {"(*Timeval).Unix", Method, 0, ""}, + {"(Errno).Error", Method, 0, ""}, + {"(Errno).Is", Method, 13, ""}, + {"(Errno).Temporary", Method, 0, ""}, + {"(Errno).Timeout", Method, 0, ""}, + {"(Signal).Signal", Method, 0, ""}, + {"(Signal).String", Method, 0, ""}, + {"(Token).Close", Method, 0, ""}, + {"(Token).GetTokenPrimaryGroup", Method, 0, ""}, + {"(Token).GetTokenUser", Method, 0, ""}, + {"(Token).GetUserProfileDirectory", Method, 0, ""}, + {"(WaitStatus).Continued", Method, 0, ""}, + {"(WaitStatus).CoreDump", Method, 0, ""}, + {"(WaitStatus).ExitStatus", Method, 0, ""}, + {"(WaitStatus).Exited", Method, 0, ""}, + {"(WaitStatus).Signal", Method, 0, ""}, + {"(WaitStatus).Signaled", Method, 0, ""}, + {"(WaitStatus).StopSignal", Method, 0, ""}, + {"(WaitStatus).Stopped", Method, 0, ""}, + {"(WaitStatus).TrapCause", Method, 0, ""}, + {"AF_ALG", Const, 0, ""}, + {"AF_APPLETALK", Const, 0, ""}, + {"AF_ARP", Const, 0, ""}, + {"AF_ASH", Const, 0, ""}, + {"AF_ATM", Const, 0, ""}, + {"AF_ATMPVC", Const, 0, ""}, + {"AF_ATMSVC", Const, 0, ""}, + {"AF_AX25", Const, 0, ""}, + {"AF_BLUETOOTH", Const, 0, ""}, + {"AF_BRIDGE", Const, 0, ""}, + {"AF_CAIF", Const, 0, ""}, + {"AF_CAN", Const, 0, ""}, + {"AF_CCITT", Const, 0, ""}, + {"AF_CHAOS", Const, 0, ""}, + {"AF_CNT", Const, 0, ""}, + {"AF_COIP", Const, 0, ""}, + {"AF_DATAKIT", Const, 0, ""}, + {"AF_DECnet", Const, 0, ""}, + {"AF_DLI", Const, 0, ""}, + {"AF_E164", Const, 0, ""}, + {"AF_ECMA", Const, 0, ""}, + {"AF_ECONET", Const, 0, ""}, + {"AF_ENCAP", Const, 1, ""}, + {"AF_FILE", Const, 0, ""}, + {"AF_HYLINK", Const, 0, ""}, + {"AF_IEEE80211", Const, 0, ""}, + {"AF_IEEE802154", Const, 0, ""}, + {"AF_IMPLINK", Const, 0, ""}, + {"AF_INET", Const, 0, ""}, + {"AF_INET6", Const, 0, ""}, + {"AF_INET6_SDP", Const, 3, ""}, + {"AF_INET_SDP", Const, 3, ""}, + {"AF_IPX", Const, 0, ""}, + {"AF_IRDA", Const, 0, ""}, + {"AF_ISDN", Const, 0, ""}, + {"AF_ISO", Const, 0, ""}, + {"AF_IUCV", Const, 0, ""}, + {"AF_KEY", Const, 0, ""}, + {"AF_LAT", Const, 0, ""}, + {"AF_LINK", Const, 0, ""}, + {"AF_LLC", Const, 0, ""}, + {"AF_LOCAL", Const, 0, ""}, + {"AF_MAX", Const, 0, ""}, + {"AF_MPLS", Const, 1, ""}, + {"AF_NATM", Const, 0, ""}, + {"AF_NDRV", Const, 0, ""}, + {"AF_NETBEUI", Const, 0, ""}, + {"AF_NETBIOS", Const, 0, ""}, + {"AF_NETGRAPH", Const, 0, ""}, + {"AF_NETLINK", Const, 0, ""}, + {"AF_NETROM", Const, 0, ""}, + {"AF_NS", Const, 0, ""}, + {"AF_OROUTE", Const, 1, ""}, + {"AF_OSI", Const, 0, ""}, + {"AF_PACKET", Const, 0, ""}, + {"AF_PHONET", Const, 0, ""}, + {"AF_PPP", Const, 0, ""}, + {"AF_PPPOX", Const, 0, ""}, + {"AF_PUP", Const, 0, ""}, + {"AF_RDS", Const, 0, ""}, + {"AF_RESERVED_36", Const, 0, ""}, + {"AF_ROSE", Const, 0, ""}, + {"AF_ROUTE", Const, 0, ""}, + {"AF_RXRPC", Const, 0, ""}, + {"AF_SCLUSTER", Const, 0, ""}, + {"AF_SECURITY", Const, 0, ""}, + {"AF_SIP", Const, 0, ""}, + {"AF_SLOW", Const, 0, ""}, + {"AF_SNA", Const, 0, ""}, + {"AF_SYSTEM", Const, 0, ""}, + {"AF_TIPC", Const, 0, ""}, + {"AF_UNIX", Const, 0, ""}, + {"AF_UNSPEC", Const, 0, ""}, + {"AF_UTUN", Const, 16, ""}, + {"AF_VENDOR00", Const, 0, ""}, + {"AF_VENDOR01", Const, 0, ""}, + {"AF_VENDOR02", Const, 0, ""}, + {"AF_VENDOR03", Const, 0, ""}, + {"AF_VENDOR04", Const, 0, ""}, + {"AF_VENDOR05", Const, 0, ""}, + {"AF_VENDOR06", Const, 0, ""}, + {"AF_VENDOR07", Const, 0, ""}, + {"AF_VENDOR08", Const, 0, ""}, + {"AF_VENDOR09", Const, 0, ""}, + {"AF_VENDOR10", Const, 0, ""}, + {"AF_VENDOR11", Const, 0, ""}, + {"AF_VENDOR12", Const, 0, ""}, + {"AF_VENDOR13", Const, 0, ""}, + {"AF_VENDOR14", Const, 0, ""}, + {"AF_VENDOR15", Const, 0, ""}, + {"AF_VENDOR16", Const, 0, ""}, + {"AF_VENDOR17", Const, 0, ""}, + {"AF_VENDOR18", Const, 0, ""}, + {"AF_VENDOR19", Const, 0, ""}, + {"AF_VENDOR20", Const, 0, ""}, + {"AF_VENDOR21", Const, 0, ""}, + {"AF_VENDOR22", Const, 0, ""}, + {"AF_VENDOR23", Const, 0, ""}, + {"AF_VENDOR24", Const, 0, ""}, + {"AF_VENDOR25", Const, 0, ""}, + {"AF_VENDOR26", Const, 0, ""}, + {"AF_VENDOR27", Const, 0, ""}, + {"AF_VENDOR28", Const, 0, ""}, + {"AF_VENDOR29", Const, 0, ""}, + {"AF_VENDOR30", Const, 0, ""}, + {"AF_VENDOR31", Const, 0, ""}, + {"AF_VENDOR32", Const, 0, ""}, + {"AF_VENDOR33", Const, 0, ""}, + {"AF_VENDOR34", Const, 0, ""}, + {"AF_VENDOR35", Const, 0, ""}, + {"AF_VENDOR36", Const, 0, ""}, + {"AF_VENDOR37", Const, 0, ""}, + {"AF_VENDOR38", Const, 0, ""}, + {"AF_VENDOR39", Const, 0, ""}, + {"AF_VENDOR40", Const, 0, ""}, + {"AF_VENDOR41", Const, 0, ""}, + {"AF_VENDOR42", Const, 0, ""}, + {"AF_VENDOR43", Const, 0, ""}, + {"AF_VENDOR44", Const, 0, ""}, + {"AF_VENDOR45", Const, 0, ""}, + {"AF_VENDOR46", Const, 0, ""}, + {"AF_VENDOR47", Const, 0, ""}, + {"AF_WANPIPE", Const, 0, ""}, + {"AF_X25", Const, 0, ""}, + {"AI_CANONNAME", Const, 1, ""}, + {"AI_NUMERICHOST", Const, 1, ""}, + {"AI_PASSIVE", Const, 1, ""}, + {"APPLICATION_ERROR", Const, 0, ""}, + {"ARPHRD_ADAPT", Const, 0, ""}, + {"ARPHRD_APPLETLK", Const, 0, ""}, + {"ARPHRD_ARCNET", Const, 0, ""}, + {"ARPHRD_ASH", Const, 0, ""}, + {"ARPHRD_ATM", Const, 0, ""}, + {"ARPHRD_AX25", Const, 0, ""}, + {"ARPHRD_BIF", Const, 0, ""}, + {"ARPHRD_CHAOS", Const, 0, ""}, + {"ARPHRD_CISCO", Const, 0, ""}, + {"ARPHRD_CSLIP", Const, 0, ""}, + {"ARPHRD_CSLIP6", Const, 0, ""}, + {"ARPHRD_DDCMP", Const, 0, ""}, + {"ARPHRD_DLCI", Const, 0, ""}, + {"ARPHRD_ECONET", Const, 0, ""}, + {"ARPHRD_EETHER", Const, 0, ""}, + {"ARPHRD_ETHER", Const, 0, ""}, + {"ARPHRD_EUI64", Const, 0, ""}, + {"ARPHRD_FCAL", Const, 0, ""}, + {"ARPHRD_FCFABRIC", Const, 0, ""}, + {"ARPHRD_FCPL", Const, 0, ""}, + {"ARPHRD_FCPP", Const, 0, ""}, + {"ARPHRD_FDDI", Const, 0, ""}, + {"ARPHRD_FRAD", Const, 0, ""}, + {"ARPHRD_FRELAY", Const, 1, ""}, + {"ARPHRD_HDLC", Const, 0, ""}, + {"ARPHRD_HIPPI", Const, 0, ""}, + {"ARPHRD_HWX25", Const, 0, ""}, + {"ARPHRD_IEEE1394", Const, 0, ""}, + {"ARPHRD_IEEE802", Const, 0, ""}, + {"ARPHRD_IEEE80211", Const, 0, ""}, + {"ARPHRD_IEEE80211_PRISM", Const, 0, ""}, + {"ARPHRD_IEEE80211_RADIOTAP", Const, 0, ""}, + {"ARPHRD_IEEE802154", Const, 0, ""}, + {"ARPHRD_IEEE802154_PHY", Const, 0, ""}, + {"ARPHRD_IEEE802_TR", Const, 0, ""}, + {"ARPHRD_INFINIBAND", Const, 0, ""}, + {"ARPHRD_IPDDP", Const, 0, ""}, + {"ARPHRD_IPGRE", Const, 0, ""}, + {"ARPHRD_IRDA", Const, 0, ""}, + {"ARPHRD_LAPB", Const, 0, ""}, + {"ARPHRD_LOCALTLK", Const, 0, ""}, + {"ARPHRD_LOOPBACK", Const, 0, ""}, + {"ARPHRD_METRICOM", Const, 0, ""}, + {"ARPHRD_NETROM", Const, 0, ""}, + {"ARPHRD_NONE", Const, 0, ""}, + {"ARPHRD_PIMREG", Const, 0, ""}, + {"ARPHRD_PPP", Const, 0, ""}, + {"ARPHRD_PRONET", Const, 0, ""}, + {"ARPHRD_RAWHDLC", Const, 0, ""}, + {"ARPHRD_ROSE", Const, 0, ""}, + {"ARPHRD_RSRVD", Const, 0, ""}, + {"ARPHRD_SIT", Const, 0, ""}, + {"ARPHRD_SKIP", Const, 0, ""}, + {"ARPHRD_SLIP", Const, 0, ""}, + {"ARPHRD_SLIP6", Const, 0, ""}, + {"ARPHRD_STRIP", Const, 1, ""}, + {"ARPHRD_TUNNEL", Const, 0, ""}, + {"ARPHRD_TUNNEL6", Const, 0, ""}, + {"ARPHRD_VOID", Const, 0, ""}, + {"ARPHRD_X25", Const, 0, ""}, + {"AUTHTYPE_CLIENT", Const, 0, ""}, + {"AUTHTYPE_SERVER", Const, 0, ""}, + {"Accept", Func, 0, "func(fd int) (nfd int, sa Sockaddr, err error)"}, + {"Accept4", Func, 1, "func(fd int, flags int) (nfd int, sa Sockaddr, err error)"}, + {"AcceptEx", Func, 0, ""}, + {"Access", Func, 0, "func(path string, mode uint32) (err error)"}, + {"Acct", Func, 0, "func(path string) (err error)"}, + {"AddrinfoW", Type, 1, ""}, + {"AddrinfoW.Addr", Field, 1, ""}, + {"AddrinfoW.Addrlen", Field, 1, ""}, + {"AddrinfoW.Canonname", Field, 1, ""}, + {"AddrinfoW.Family", Field, 1, ""}, + {"AddrinfoW.Flags", Field, 1, ""}, + {"AddrinfoW.Next", Field, 1, ""}, + {"AddrinfoW.Protocol", Field, 1, ""}, + {"AddrinfoW.Socktype", Field, 1, ""}, + {"Adjtime", Func, 0, ""}, + {"Adjtimex", Func, 0, "func(buf *Timex) (state int, err error)"}, + {"AllThreadsSyscall", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"}, + {"AllThreadsSyscall6", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"}, + {"AttachLsf", Func, 0, "func(fd int, i []SockFilter) error"}, + {"B0", Const, 0, ""}, + {"B1000000", Const, 0, ""}, + {"B110", Const, 0, ""}, + {"B115200", Const, 0, ""}, + {"B1152000", Const, 0, ""}, + {"B1200", Const, 0, ""}, + {"B134", Const, 0, ""}, + {"B14400", Const, 1, ""}, + {"B150", Const, 0, ""}, + {"B1500000", Const, 0, ""}, + {"B1800", Const, 0, ""}, + {"B19200", Const, 0, ""}, + {"B200", Const, 0, ""}, + {"B2000000", Const, 0, ""}, + {"B230400", Const, 0, ""}, + {"B2400", Const, 0, ""}, + {"B2500000", Const, 0, ""}, + {"B28800", Const, 1, ""}, + {"B300", Const, 0, ""}, + {"B3000000", Const, 0, ""}, + {"B3500000", Const, 0, ""}, + {"B38400", Const, 0, ""}, + {"B4000000", Const, 0, ""}, + {"B460800", Const, 0, ""}, + {"B4800", Const, 0, ""}, + {"B50", Const, 0, ""}, + {"B500000", Const, 0, ""}, + {"B57600", Const, 0, ""}, + {"B576000", Const, 0, ""}, + {"B600", Const, 0, ""}, + {"B7200", Const, 1, ""}, + {"B75", Const, 0, ""}, + {"B76800", Const, 1, ""}, + {"B921600", Const, 0, ""}, + {"B9600", Const, 0, ""}, + {"BASE_PROTOCOL", Const, 2, ""}, + {"BIOCFEEDBACK", Const, 0, ""}, + {"BIOCFLUSH", Const, 0, ""}, + {"BIOCGBLEN", Const, 0, ""}, + {"BIOCGDIRECTION", Const, 0, ""}, + {"BIOCGDIRFILT", Const, 1, ""}, + {"BIOCGDLT", Const, 0, ""}, + {"BIOCGDLTLIST", Const, 0, ""}, + {"BIOCGETBUFMODE", Const, 0, ""}, + {"BIOCGETIF", Const, 0, ""}, + {"BIOCGETZMAX", Const, 0, ""}, + {"BIOCGFEEDBACK", Const, 1, ""}, + {"BIOCGFILDROP", Const, 1, ""}, + {"BIOCGHDRCMPLT", Const, 0, ""}, + {"BIOCGRSIG", Const, 0, ""}, + {"BIOCGRTIMEOUT", Const, 0, ""}, + {"BIOCGSEESENT", Const, 0, ""}, + {"BIOCGSTATS", Const, 0, ""}, + {"BIOCGSTATSOLD", Const, 1, ""}, + {"BIOCGTSTAMP", Const, 1, ""}, + {"BIOCIMMEDIATE", Const, 0, ""}, + {"BIOCLOCK", Const, 0, ""}, + {"BIOCPROMISC", Const, 0, ""}, + {"BIOCROTZBUF", Const, 0, ""}, + {"BIOCSBLEN", Const, 0, ""}, + {"BIOCSDIRECTION", Const, 0, ""}, + {"BIOCSDIRFILT", Const, 1, ""}, + {"BIOCSDLT", Const, 0, ""}, + {"BIOCSETBUFMODE", Const, 0, ""}, + {"BIOCSETF", Const, 0, ""}, + {"BIOCSETFNR", Const, 0, ""}, + {"BIOCSETIF", Const, 0, ""}, + {"BIOCSETWF", Const, 0, ""}, + {"BIOCSETZBUF", Const, 0, ""}, + {"BIOCSFEEDBACK", Const, 1, ""}, + {"BIOCSFILDROP", Const, 1, ""}, + {"BIOCSHDRCMPLT", Const, 0, ""}, + {"BIOCSRSIG", Const, 0, ""}, + {"BIOCSRTIMEOUT", Const, 0, ""}, + {"BIOCSSEESENT", Const, 0, ""}, + {"BIOCSTCPF", Const, 1, ""}, + {"BIOCSTSTAMP", Const, 1, ""}, + {"BIOCSUDPF", Const, 1, ""}, + {"BIOCVERSION", Const, 0, ""}, + {"BPF_A", Const, 0, ""}, + {"BPF_ABS", Const, 0, ""}, + {"BPF_ADD", Const, 0, ""}, + {"BPF_ALIGNMENT", Const, 0, ""}, + {"BPF_ALIGNMENT32", Const, 1, ""}, + {"BPF_ALU", Const, 0, ""}, + {"BPF_AND", Const, 0, ""}, + {"BPF_B", Const, 0, ""}, + {"BPF_BUFMODE_BUFFER", Const, 0, ""}, + {"BPF_BUFMODE_ZBUF", Const, 0, ""}, + {"BPF_DFLTBUFSIZE", Const, 1, ""}, + {"BPF_DIRECTION_IN", Const, 1, ""}, + {"BPF_DIRECTION_OUT", Const, 1, ""}, + {"BPF_DIV", Const, 0, ""}, + {"BPF_H", Const, 0, ""}, + {"BPF_IMM", Const, 0, ""}, + {"BPF_IND", Const, 0, ""}, + {"BPF_JA", Const, 0, ""}, + {"BPF_JEQ", Const, 0, ""}, + {"BPF_JGE", Const, 0, ""}, + {"BPF_JGT", Const, 0, ""}, + {"BPF_JMP", Const, 0, ""}, + {"BPF_JSET", Const, 0, ""}, + {"BPF_K", Const, 0, ""}, + {"BPF_LD", Const, 0, ""}, + {"BPF_LDX", Const, 0, ""}, + {"BPF_LEN", Const, 0, ""}, + {"BPF_LSH", Const, 0, ""}, + {"BPF_MAJOR_VERSION", Const, 0, ""}, + {"BPF_MAXBUFSIZE", Const, 0, ""}, + {"BPF_MAXINSNS", Const, 0, ""}, + {"BPF_MEM", Const, 0, ""}, + {"BPF_MEMWORDS", Const, 0, ""}, + {"BPF_MINBUFSIZE", Const, 0, ""}, + {"BPF_MINOR_VERSION", Const, 0, ""}, + {"BPF_MISC", Const, 0, ""}, + {"BPF_MSH", Const, 0, ""}, + {"BPF_MUL", Const, 0, ""}, + {"BPF_NEG", Const, 0, ""}, + {"BPF_OR", Const, 0, ""}, + {"BPF_RELEASE", Const, 0, ""}, + {"BPF_RET", Const, 0, ""}, + {"BPF_RSH", Const, 0, ""}, + {"BPF_ST", Const, 0, ""}, + {"BPF_STX", Const, 0, ""}, + {"BPF_SUB", Const, 0, ""}, + {"BPF_TAX", Const, 0, ""}, + {"BPF_TXA", Const, 0, ""}, + {"BPF_T_BINTIME", Const, 1, ""}, + {"BPF_T_BINTIME_FAST", Const, 1, ""}, + {"BPF_T_BINTIME_MONOTONIC", Const, 1, ""}, + {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1, ""}, + {"BPF_T_FAST", Const, 1, ""}, + {"BPF_T_FLAG_MASK", Const, 1, ""}, + {"BPF_T_FORMAT_MASK", Const, 1, ""}, + {"BPF_T_MICROTIME", Const, 1, ""}, + {"BPF_T_MICROTIME_FAST", Const, 1, ""}, + {"BPF_T_MICROTIME_MONOTONIC", Const, 1, ""}, + {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1, ""}, + {"BPF_T_MONOTONIC", Const, 1, ""}, + {"BPF_T_MONOTONIC_FAST", Const, 1, ""}, + {"BPF_T_NANOTIME", Const, 1, ""}, + {"BPF_T_NANOTIME_FAST", Const, 1, ""}, + {"BPF_T_NANOTIME_MONOTONIC", Const, 1, ""}, + {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1, ""}, + {"BPF_T_NONE", Const, 1, ""}, + {"BPF_T_NORMAL", Const, 1, ""}, + {"BPF_W", Const, 0, ""}, + {"BPF_X", Const, 0, ""}, + {"BRKINT", Const, 0, ""}, + {"Bind", Func, 0, "func(fd int, sa Sockaddr) (err error)"}, + {"BindToDevice", Func, 0, "func(fd int, device string) (err error)"}, + {"BpfBuflen", Func, 0, ""}, + {"BpfDatalink", Func, 0, ""}, + {"BpfHdr", Type, 0, ""}, + {"BpfHdr.Caplen", Field, 0, ""}, + {"BpfHdr.Datalen", Field, 0, ""}, + {"BpfHdr.Hdrlen", Field, 0, ""}, + {"BpfHdr.Pad_cgo_0", Field, 0, ""}, + {"BpfHdr.Tstamp", Field, 0, ""}, + {"BpfHeadercmpl", Func, 0, ""}, + {"BpfInsn", Type, 0, ""}, + {"BpfInsn.Code", Field, 0, ""}, + {"BpfInsn.Jf", Field, 0, ""}, + {"BpfInsn.Jt", Field, 0, ""}, + {"BpfInsn.K", Field, 0, ""}, + {"BpfInterface", Func, 0, ""}, + {"BpfJump", Func, 0, ""}, + {"BpfProgram", Type, 0, ""}, + {"BpfProgram.Insns", Field, 0, ""}, + {"BpfProgram.Len", Field, 0, ""}, + {"BpfProgram.Pad_cgo_0", Field, 0, ""}, + {"BpfStat", Type, 0, ""}, + {"BpfStat.Capt", Field, 2, ""}, + {"BpfStat.Drop", Field, 0, ""}, + {"BpfStat.Padding", Field, 2, ""}, + {"BpfStat.Recv", Field, 0, ""}, + {"BpfStats", Func, 0, ""}, + {"BpfStmt", Func, 0, ""}, + {"BpfTimeout", Func, 0, ""}, + {"BpfTimeval", Type, 2, ""}, + {"BpfTimeval.Sec", Field, 2, ""}, + {"BpfTimeval.Usec", Field, 2, ""}, + {"BpfVersion", Type, 0, ""}, + {"BpfVersion.Major", Field, 0, ""}, + {"BpfVersion.Minor", Field, 0, ""}, + {"BpfZbuf", Type, 0, ""}, + {"BpfZbuf.Bufa", Field, 0, ""}, + {"BpfZbuf.Bufb", Field, 0, ""}, + {"BpfZbuf.Buflen", Field, 0, ""}, + {"BpfZbufHeader", Type, 0, ""}, + {"BpfZbufHeader.Kernel_gen", Field, 0, ""}, + {"BpfZbufHeader.Kernel_len", Field, 0, ""}, + {"BpfZbufHeader.User_gen", Field, 0, ""}, + {"BpfZbufHeader.X_bzh_pad", Field, 0, ""}, + {"ByHandleFileInformation", Type, 0, ""}, + {"ByHandleFileInformation.CreationTime", Field, 0, ""}, + {"ByHandleFileInformation.FileAttributes", Field, 0, ""}, + {"ByHandleFileInformation.FileIndexHigh", Field, 0, ""}, + {"ByHandleFileInformation.FileIndexLow", Field, 0, ""}, + {"ByHandleFileInformation.FileSizeHigh", Field, 0, ""}, + {"ByHandleFileInformation.FileSizeLow", Field, 0, ""}, + {"ByHandleFileInformation.LastAccessTime", Field, 0, ""}, + {"ByHandleFileInformation.LastWriteTime", Field, 0, ""}, + {"ByHandleFileInformation.NumberOfLinks", Field, 0, ""}, + {"ByHandleFileInformation.VolumeSerialNumber", Field, 0, ""}, + {"BytePtrFromString", Func, 1, "func(s string) (*byte, error)"}, + {"ByteSliceFromString", Func, 1, "func(s string) ([]byte, error)"}, + {"CCR0_FLUSH", Const, 1, ""}, + {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0, ""}, + {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0, ""}, + {"CERT_CHAIN_POLICY_BASE", Const, 0, ""}, + {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0, ""}, + {"CERT_CHAIN_POLICY_EV", Const, 0, ""}, + {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0, ""}, + {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0, ""}, + {"CERT_CHAIN_POLICY_SSL", Const, 0, ""}, + {"CERT_E_CN_NO_MATCH", Const, 0, ""}, + {"CERT_E_EXPIRED", Const, 0, ""}, + {"CERT_E_PURPOSE", Const, 0, ""}, + {"CERT_E_ROLE", Const, 0, ""}, + {"CERT_E_UNTRUSTEDROOT", Const, 0, ""}, + {"CERT_STORE_ADD_ALWAYS", Const, 0, ""}, + {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0, ""}, + {"CERT_STORE_PROV_MEMORY", Const, 0, ""}, + {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0, ""}, + {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0, ""}, + {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0, ""}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0, ""}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0, ""}, + {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0, ""}, + {"CERT_TRUST_INVALID_EXTENSION", Const, 0, ""}, + {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0, ""}, + {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0, ""}, + {"CERT_TRUST_IS_CYCLIC", Const, 0, ""}, + {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0, ""}, + {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0, ""}, + {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0, ""}, + {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0, ""}, + {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0, ""}, + {"CERT_TRUST_IS_REVOKED", Const, 0, ""}, + {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0, ""}, + {"CERT_TRUST_NO_ERROR", Const, 0, ""}, + {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0, ""}, + {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0, ""}, + {"CFLUSH", Const, 1, ""}, + {"CLOCAL", Const, 0, ""}, + {"CLONE_CHILD_CLEARTID", Const, 2, ""}, + {"CLONE_CHILD_SETTID", Const, 2, ""}, + {"CLONE_CLEAR_SIGHAND", Const, 20, ""}, + {"CLONE_CSIGNAL", Const, 3, ""}, + {"CLONE_DETACHED", Const, 2, ""}, + {"CLONE_FILES", Const, 2, ""}, + {"CLONE_FS", Const, 2, ""}, + {"CLONE_INTO_CGROUP", Const, 20, ""}, + {"CLONE_IO", Const, 2, ""}, + {"CLONE_NEWCGROUP", Const, 20, ""}, + {"CLONE_NEWIPC", Const, 2, ""}, + {"CLONE_NEWNET", Const, 2, ""}, + {"CLONE_NEWNS", Const, 2, ""}, + {"CLONE_NEWPID", Const, 2, ""}, + {"CLONE_NEWTIME", Const, 20, ""}, + {"CLONE_NEWUSER", Const, 2, ""}, + {"CLONE_NEWUTS", Const, 2, ""}, + {"CLONE_PARENT", Const, 2, ""}, + {"CLONE_PARENT_SETTID", Const, 2, ""}, + {"CLONE_PID", Const, 3, ""}, + {"CLONE_PIDFD", Const, 20, ""}, + {"CLONE_PTRACE", Const, 2, ""}, + {"CLONE_SETTLS", Const, 2, ""}, + {"CLONE_SIGHAND", Const, 2, ""}, + {"CLONE_SYSVSEM", Const, 2, ""}, + {"CLONE_THREAD", Const, 2, ""}, + {"CLONE_UNTRACED", Const, 2, ""}, + {"CLONE_VFORK", Const, 2, ""}, + {"CLONE_VM", Const, 2, ""}, + {"CPUID_CFLUSH", Const, 1, ""}, + {"CREAD", Const, 0, ""}, + {"CREATE_ALWAYS", Const, 0, ""}, + {"CREATE_NEW", Const, 0, ""}, + {"CREATE_NEW_PROCESS_GROUP", Const, 1, ""}, + {"CREATE_UNICODE_ENVIRONMENT", Const, 0, ""}, + {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0, ""}, + {"CRYPT_DELETEKEYSET", Const, 0, ""}, + {"CRYPT_MACHINE_KEYSET", Const, 0, ""}, + {"CRYPT_NEWKEYSET", Const, 0, ""}, + {"CRYPT_SILENT", Const, 0, ""}, + {"CRYPT_VERIFYCONTEXT", Const, 0, ""}, + {"CS5", Const, 0, ""}, + {"CS6", Const, 0, ""}, + {"CS7", Const, 0, ""}, + {"CS8", Const, 0, ""}, + {"CSIZE", Const, 0, ""}, + {"CSTART", Const, 1, ""}, + {"CSTATUS", Const, 1, ""}, + {"CSTOP", Const, 1, ""}, + {"CSTOPB", Const, 0, ""}, + {"CSUSP", Const, 1, ""}, + {"CTL_MAXNAME", Const, 0, ""}, + {"CTL_NET", Const, 0, ""}, + {"CTL_QUERY", Const, 1, ""}, + {"CTRL_BREAK_EVENT", Const, 1, ""}, + {"CTRL_CLOSE_EVENT", Const, 14, ""}, + {"CTRL_C_EVENT", Const, 1, ""}, + {"CTRL_LOGOFF_EVENT", Const, 14, ""}, + {"CTRL_SHUTDOWN_EVENT", Const, 14, ""}, + {"CancelIo", Func, 0, ""}, + {"CancelIoEx", Func, 1, ""}, + {"CertAddCertificateContextToStore", Func, 0, ""}, + {"CertChainContext", Type, 0, ""}, + {"CertChainContext.ChainCount", Field, 0, ""}, + {"CertChainContext.Chains", Field, 0, ""}, + {"CertChainContext.HasRevocationFreshnessTime", Field, 0, ""}, + {"CertChainContext.LowerQualityChainCount", Field, 0, ""}, + {"CertChainContext.LowerQualityChains", Field, 0, ""}, + {"CertChainContext.RevocationFreshnessTime", Field, 0, ""}, + {"CertChainContext.Size", Field, 0, ""}, + {"CertChainContext.TrustStatus", Field, 0, ""}, + {"CertChainElement", Type, 0, ""}, + {"CertChainElement.ApplicationUsage", Field, 0, ""}, + {"CertChainElement.CertContext", Field, 0, ""}, + {"CertChainElement.ExtendedErrorInfo", Field, 0, ""}, + {"CertChainElement.IssuanceUsage", Field, 0, ""}, + {"CertChainElement.RevocationInfo", Field, 0, ""}, + {"CertChainElement.Size", Field, 0, ""}, + {"CertChainElement.TrustStatus", Field, 0, ""}, + {"CertChainPara", Type, 0, ""}, + {"CertChainPara.CacheResync", Field, 0, ""}, + {"CertChainPara.CheckRevocationFreshnessTime", Field, 0, ""}, + {"CertChainPara.RequestedUsage", Field, 0, ""}, + {"CertChainPara.RequstedIssuancePolicy", Field, 0, ""}, + {"CertChainPara.RevocationFreshnessTime", Field, 0, ""}, + {"CertChainPara.Size", Field, 0, ""}, + {"CertChainPara.URLRetrievalTimeout", Field, 0, ""}, + {"CertChainPolicyPara", Type, 0, ""}, + {"CertChainPolicyPara.ExtraPolicyPara", Field, 0, ""}, + {"CertChainPolicyPara.Flags", Field, 0, ""}, + {"CertChainPolicyPara.Size", Field, 0, ""}, + {"CertChainPolicyStatus", Type, 0, ""}, + {"CertChainPolicyStatus.ChainIndex", Field, 0, ""}, + {"CertChainPolicyStatus.ElementIndex", Field, 0, ""}, + {"CertChainPolicyStatus.Error", Field, 0, ""}, + {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0, ""}, + {"CertChainPolicyStatus.Size", Field, 0, ""}, + {"CertCloseStore", Func, 0, ""}, + {"CertContext", Type, 0, ""}, + {"CertContext.CertInfo", Field, 0, ""}, + {"CertContext.EncodedCert", Field, 0, ""}, + {"CertContext.EncodingType", Field, 0, ""}, + {"CertContext.Length", Field, 0, ""}, + {"CertContext.Store", Field, 0, ""}, + {"CertCreateCertificateContext", Func, 0, ""}, + {"CertEnhKeyUsage", Type, 0, ""}, + {"CertEnhKeyUsage.Length", Field, 0, ""}, + {"CertEnhKeyUsage.UsageIdentifiers", Field, 0, ""}, + {"CertEnumCertificatesInStore", Func, 0, ""}, + {"CertFreeCertificateChain", Func, 0, ""}, + {"CertFreeCertificateContext", Func, 0, ""}, + {"CertGetCertificateChain", Func, 0, ""}, + {"CertInfo", Type, 11, ""}, + {"CertOpenStore", Func, 0, ""}, + {"CertOpenSystemStore", Func, 0, ""}, + {"CertRevocationCrlInfo", Type, 11, ""}, + {"CertRevocationInfo", Type, 0, ""}, + {"CertRevocationInfo.CrlInfo", Field, 0, ""}, + {"CertRevocationInfo.FreshnessTime", Field, 0, ""}, + {"CertRevocationInfo.HasFreshnessTime", Field, 0, ""}, + {"CertRevocationInfo.OidSpecificInfo", Field, 0, ""}, + {"CertRevocationInfo.RevocationOid", Field, 0, ""}, + {"CertRevocationInfo.RevocationResult", Field, 0, ""}, + {"CertRevocationInfo.Size", Field, 0, ""}, + {"CertSimpleChain", Type, 0, ""}, + {"CertSimpleChain.Elements", Field, 0, ""}, + {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0, ""}, + {"CertSimpleChain.NumElements", Field, 0, ""}, + {"CertSimpleChain.RevocationFreshnessTime", Field, 0, ""}, + {"CertSimpleChain.Size", Field, 0, ""}, + {"CertSimpleChain.TrustListInfo", Field, 0, ""}, + {"CertSimpleChain.TrustStatus", Field, 0, ""}, + {"CertTrustListInfo", Type, 11, ""}, + {"CertTrustStatus", Type, 0, ""}, + {"CertTrustStatus.ErrorStatus", Field, 0, ""}, + {"CertTrustStatus.InfoStatus", Field, 0, ""}, + {"CertUsageMatch", Type, 0, ""}, + {"CertUsageMatch.Type", Field, 0, ""}, + {"CertUsageMatch.Usage", Field, 0, ""}, + {"CertVerifyCertificateChainPolicy", Func, 0, ""}, + {"Chdir", Func, 0, "func(path string) (err error)"}, + {"CheckBpfVersion", Func, 0, ""}, + {"Chflags", Func, 0, ""}, + {"Chmod", Func, 0, "func(path string, mode uint32) (err error)"}, + {"Chown", Func, 0, "func(path string, uid int, gid int) (err error)"}, + {"Chroot", Func, 0, "func(path string) (err error)"}, + {"Clearenv", Func, 0, "func()"}, + {"Close", Func, 0, "func(fd int) (err error)"}, + {"CloseHandle", Func, 0, ""}, + {"CloseOnExec", Func, 0, "func(fd int)"}, + {"Closesocket", Func, 0, ""}, + {"CmsgLen", Func, 0, "func(datalen int) int"}, + {"CmsgSpace", Func, 0, "func(datalen int) int"}, + {"Cmsghdr", Type, 0, ""}, + {"Cmsghdr.Len", Field, 0, ""}, + {"Cmsghdr.Level", Field, 0, ""}, + {"Cmsghdr.Type", Field, 0, ""}, + {"Cmsghdr.X__cmsg_data", Field, 0, ""}, + {"CommandLineToArgv", Func, 0, ""}, + {"ComputerName", Func, 0, ""}, + {"Conn", Type, 9, ""}, + {"Connect", Func, 0, "func(fd int, sa Sockaddr) (err error)"}, + {"ConnectEx", Func, 1, ""}, + {"ConvertSidToStringSid", Func, 0, ""}, + {"ConvertStringSidToSid", Func, 0, ""}, + {"CopySid", Func, 0, ""}, + {"Creat", Func, 0, "func(path string, mode uint32) (fd int, err error)"}, + {"CreateDirectory", Func, 0, ""}, + {"CreateFile", Func, 0, ""}, + {"CreateFileMapping", Func, 0, ""}, + {"CreateHardLink", Func, 4, ""}, + {"CreateIoCompletionPort", Func, 0, ""}, + {"CreatePipe", Func, 0, ""}, + {"CreateProcess", Func, 0, ""}, + {"CreateProcessAsUser", Func, 10, ""}, + {"CreateSymbolicLink", Func, 4, ""}, + {"CreateToolhelp32Snapshot", Func, 4, ""}, + {"Credential", Type, 0, ""}, + {"Credential.Gid", Field, 0, ""}, + {"Credential.Groups", Field, 0, ""}, + {"Credential.NoSetGroups", Field, 9, ""}, + {"Credential.Uid", Field, 0, ""}, + {"CryptAcquireContext", Func, 0, ""}, + {"CryptGenRandom", Func, 0, ""}, + {"CryptReleaseContext", Func, 0, ""}, + {"DIOCBSFLUSH", Const, 1, ""}, + {"DIOCOSFPFLUSH", Const, 1, ""}, + {"DLL", Type, 0, ""}, + {"DLL.Handle", Field, 0, ""}, + {"DLL.Name", Field, 0, ""}, + {"DLLError", Type, 0, ""}, + {"DLLError.Err", Field, 0, ""}, + {"DLLError.Msg", Field, 0, ""}, + {"DLLError.ObjName", Field, 0, ""}, + {"DLT_A429", Const, 0, ""}, + {"DLT_A653_ICM", Const, 0, ""}, + {"DLT_AIRONET_HEADER", Const, 0, ""}, + {"DLT_AOS", Const, 1, ""}, + {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0, ""}, + {"DLT_ARCNET", Const, 0, ""}, + {"DLT_ARCNET_LINUX", Const, 0, ""}, + {"DLT_ATM_CLIP", Const, 0, ""}, + {"DLT_ATM_RFC1483", Const, 0, ""}, + {"DLT_AURORA", Const, 0, ""}, + {"DLT_AX25", Const, 0, ""}, + {"DLT_AX25_KISS", Const, 0, ""}, + {"DLT_BACNET_MS_TP", Const, 0, ""}, + {"DLT_BLUETOOTH_HCI_H4", Const, 0, ""}, + {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0, ""}, + {"DLT_CAN20B", Const, 0, ""}, + {"DLT_CAN_SOCKETCAN", Const, 1, ""}, + {"DLT_CHAOS", Const, 0, ""}, + {"DLT_CHDLC", Const, 0, ""}, + {"DLT_CISCO_IOS", Const, 0, ""}, + {"DLT_C_HDLC", Const, 0, ""}, + {"DLT_C_HDLC_WITH_DIR", Const, 0, ""}, + {"DLT_DBUS", Const, 1, ""}, + {"DLT_DECT", Const, 1, ""}, + {"DLT_DOCSIS", Const, 0, ""}, + {"DLT_DVB_CI", Const, 1, ""}, + {"DLT_ECONET", Const, 0, ""}, + {"DLT_EN10MB", Const, 0, ""}, + {"DLT_EN3MB", Const, 0, ""}, + {"DLT_ENC", Const, 0, ""}, + {"DLT_ERF", Const, 0, ""}, + {"DLT_ERF_ETH", Const, 0, ""}, + {"DLT_ERF_POS", Const, 0, ""}, + {"DLT_FC_2", Const, 1, ""}, + {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1, ""}, + {"DLT_FDDI", Const, 0, ""}, + {"DLT_FLEXRAY", Const, 0, ""}, + {"DLT_FRELAY", Const, 0, ""}, + {"DLT_FRELAY_WITH_DIR", Const, 0, ""}, + {"DLT_GCOM_SERIAL", Const, 0, ""}, + {"DLT_GCOM_T1E1", Const, 0, ""}, + {"DLT_GPF_F", Const, 0, ""}, + {"DLT_GPF_T", Const, 0, ""}, + {"DLT_GPRS_LLC", Const, 0, ""}, + {"DLT_GSMTAP_ABIS", Const, 1, ""}, + {"DLT_GSMTAP_UM", Const, 1, ""}, + {"DLT_HDLC", Const, 1, ""}, + {"DLT_HHDLC", Const, 0, ""}, + {"DLT_HIPPI", Const, 1, ""}, + {"DLT_IBM_SN", Const, 0, ""}, + {"DLT_IBM_SP", Const, 0, ""}, + {"DLT_IEEE802", Const, 0, ""}, + {"DLT_IEEE802_11", Const, 0, ""}, + {"DLT_IEEE802_11_RADIO", Const, 0, ""}, + {"DLT_IEEE802_11_RADIO_AVS", Const, 0, ""}, + {"DLT_IEEE802_15_4", Const, 0, ""}, + {"DLT_IEEE802_15_4_LINUX", Const, 0, ""}, + {"DLT_IEEE802_15_4_NOFCS", Const, 1, ""}, + {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0, ""}, + {"DLT_IEEE802_16_MAC_CPS", Const, 0, ""}, + {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0, ""}, + {"DLT_IPFILTER", Const, 0, ""}, + {"DLT_IPMB", Const, 0, ""}, + {"DLT_IPMB_LINUX", Const, 0, ""}, + {"DLT_IPNET", Const, 1, ""}, + {"DLT_IPOIB", Const, 1, ""}, + {"DLT_IPV4", Const, 1, ""}, + {"DLT_IPV6", Const, 1, ""}, + {"DLT_IP_OVER_FC", Const, 0, ""}, + {"DLT_JUNIPER_ATM1", Const, 0, ""}, + {"DLT_JUNIPER_ATM2", Const, 0, ""}, + {"DLT_JUNIPER_ATM_CEMIC", Const, 1, ""}, + {"DLT_JUNIPER_CHDLC", Const, 0, ""}, + {"DLT_JUNIPER_ES", Const, 0, ""}, + {"DLT_JUNIPER_ETHER", Const, 0, ""}, + {"DLT_JUNIPER_FIBRECHANNEL", Const, 1, ""}, + {"DLT_JUNIPER_FRELAY", Const, 0, ""}, + {"DLT_JUNIPER_GGSN", Const, 0, ""}, + {"DLT_JUNIPER_ISM", Const, 0, ""}, + {"DLT_JUNIPER_MFR", Const, 0, ""}, + {"DLT_JUNIPER_MLFR", Const, 0, ""}, + {"DLT_JUNIPER_MLPPP", Const, 0, ""}, + {"DLT_JUNIPER_MONITOR", Const, 0, ""}, + {"DLT_JUNIPER_PIC_PEER", Const, 0, ""}, + {"DLT_JUNIPER_PPP", Const, 0, ""}, + {"DLT_JUNIPER_PPPOE", Const, 0, ""}, + {"DLT_JUNIPER_PPPOE_ATM", Const, 0, ""}, + {"DLT_JUNIPER_SERVICES", Const, 0, ""}, + {"DLT_JUNIPER_SRX_E2E", Const, 1, ""}, + {"DLT_JUNIPER_ST", Const, 0, ""}, + {"DLT_JUNIPER_VP", Const, 0, ""}, + {"DLT_JUNIPER_VS", Const, 1, ""}, + {"DLT_LAPB_WITH_DIR", Const, 0, ""}, + {"DLT_LAPD", Const, 0, ""}, + {"DLT_LIN", Const, 0, ""}, + {"DLT_LINUX_EVDEV", Const, 1, ""}, + {"DLT_LINUX_IRDA", Const, 0, ""}, + {"DLT_LINUX_LAPD", Const, 0, ""}, + {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0, ""}, + {"DLT_LINUX_SLL", Const, 0, ""}, + {"DLT_LOOP", Const, 0, ""}, + {"DLT_LTALK", Const, 0, ""}, + {"DLT_MATCHING_MAX", Const, 1, ""}, + {"DLT_MATCHING_MIN", Const, 1, ""}, + {"DLT_MFR", Const, 0, ""}, + {"DLT_MOST", Const, 0, ""}, + {"DLT_MPEG_2_TS", Const, 1, ""}, + {"DLT_MPLS", Const, 1, ""}, + {"DLT_MTP2", Const, 0, ""}, + {"DLT_MTP2_WITH_PHDR", Const, 0, ""}, + {"DLT_MTP3", Const, 0, ""}, + {"DLT_MUX27010", Const, 1, ""}, + {"DLT_NETANALYZER", Const, 1, ""}, + {"DLT_NETANALYZER_TRANSPARENT", Const, 1, ""}, + {"DLT_NFC_LLCP", Const, 1, ""}, + {"DLT_NFLOG", Const, 1, ""}, + {"DLT_NG40", Const, 1, ""}, + {"DLT_NULL", Const, 0, ""}, + {"DLT_PCI_EXP", Const, 0, ""}, + {"DLT_PFLOG", Const, 0, ""}, + {"DLT_PFSYNC", Const, 0, ""}, + {"DLT_PPI", Const, 0, ""}, + {"DLT_PPP", Const, 0, ""}, + {"DLT_PPP_BSDOS", Const, 0, ""}, + {"DLT_PPP_ETHER", Const, 0, ""}, + {"DLT_PPP_PPPD", Const, 0, ""}, + {"DLT_PPP_SERIAL", Const, 0, ""}, + {"DLT_PPP_WITH_DIR", Const, 0, ""}, + {"DLT_PPP_WITH_DIRECTION", Const, 0, ""}, + {"DLT_PRISM_HEADER", Const, 0, ""}, + {"DLT_PRONET", Const, 0, ""}, + {"DLT_RAIF1", Const, 0, ""}, + {"DLT_RAW", Const, 0, ""}, + {"DLT_RAWAF_MASK", Const, 1, ""}, + {"DLT_RIO", Const, 0, ""}, + {"DLT_SCCP", Const, 0, ""}, + {"DLT_SITA", Const, 0, ""}, + {"DLT_SLIP", Const, 0, ""}, + {"DLT_SLIP_BSDOS", Const, 0, ""}, + {"DLT_STANAG_5066_D_PDU", Const, 1, ""}, + {"DLT_SUNATM", Const, 0, ""}, + {"DLT_SYMANTEC_FIREWALL", Const, 0, ""}, + {"DLT_TZSP", Const, 0, ""}, + {"DLT_USB", Const, 0, ""}, + {"DLT_USB_LINUX", Const, 0, ""}, + {"DLT_USB_LINUX_MMAPPED", Const, 1, ""}, + {"DLT_USER0", Const, 0, ""}, + {"DLT_USER1", Const, 0, ""}, + {"DLT_USER10", Const, 0, ""}, + {"DLT_USER11", Const, 0, ""}, + {"DLT_USER12", Const, 0, ""}, + {"DLT_USER13", Const, 0, ""}, + {"DLT_USER14", Const, 0, ""}, + {"DLT_USER15", Const, 0, ""}, + {"DLT_USER2", Const, 0, ""}, + {"DLT_USER3", Const, 0, ""}, + {"DLT_USER4", Const, 0, ""}, + {"DLT_USER5", Const, 0, ""}, + {"DLT_USER6", Const, 0, ""}, + {"DLT_USER7", Const, 0, ""}, + {"DLT_USER8", Const, 0, ""}, + {"DLT_USER9", Const, 0, ""}, + {"DLT_WIHART", Const, 1, ""}, + {"DLT_X2E_SERIAL", Const, 0, ""}, + {"DLT_X2E_XORAYA", Const, 0, ""}, + {"DNSMXData", Type, 0, ""}, + {"DNSMXData.NameExchange", Field, 0, ""}, + {"DNSMXData.Pad", Field, 0, ""}, + {"DNSMXData.Preference", Field, 0, ""}, + {"DNSPTRData", Type, 0, ""}, + {"DNSPTRData.Host", Field, 0, ""}, + {"DNSRecord", Type, 0, ""}, + {"DNSRecord.Data", Field, 0, ""}, + {"DNSRecord.Dw", Field, 0, ""}, + {"DNSRecord.Length", Field, 0, ""}, + {"DNSRecord.Name", Field, 0, ""}, + {"DNSRecord.Next", Field, 0, ""}, + {"DNSRecord.Reserved", Field, 0, ""}, + {"DNSRecord.Ttl", Field, 0, ""}, + {"DNSRecord.Type", Field, 0, ""}, + {"DNSSRVData", Type, 0, ""}, + {"DNSSRVData.Pad", Field, 0, ""}, + {"DNSSRVData.Port", Field, 0, ""}, + {"DNSSRVData.Priority", Field, 0, ""}, + {"DNSSRVData.Target", Field, 0, ""}, + {"DNSSRVData.Weight", Field, 0, ""}, + {"DNSTXTData", Type, 0, ""}, + {"DNSTXTData.StringArray", Field, 0, ""}, + {"DNSTXTData.StringCount", Field, 0, ""}, + {"DNS_INFO_NO_RECORDS", Const, 4, ""}, + {"DNS_TYPE_A", Const, 0, ""}, + {"DNS_TYPE_A6", Const, 0, ""}, + {"DNS_TYPE_AAAA", Const, 0, ""}, + {"DNS_TYPE_ADDRS", Const, 0, ""}, + {"DNS_TYPE_AFSDB", Const, 0, ""}, + {"DNS_TYPE_ALL", Const, 0, ""}, + {"DNS_TYPE_ANY", Const, 0, ""}, + {"DNS_TYPE_ATMA", Const, 0, ""}, + {"DNS_TYPE_AXFR", Const, 0, ""}, + {"DNS_TYPE_CERT", Const, 0, ""}, + {"DNS_TYPE_CNAME", Const, 0, ""}, + {"DNS_TYPE_DHCID", Const, 0, ""}, + {"DNS_TYPE_DNAME", Const, 0, ""}, + {"DNS_TYPE_DNSKEY", Const, 0, ""}, + {"DNS_TYPE_DS", Const, 0, ""}, + {"DNS_TYPE_EID", Const, 0, ""}, + {"DNS_TYPE_GID", Const, 0, ""}, + {"DNS_TYPE_GPOS", Const, 0, ""}, + {"DNS_TYPE_HINFO", Const, 0, ""}, + {"DNS_TYPE_ISDN", Const, 0, ""}, + {"DNS_TYPE_IXFR", Const, 0, ""}, + {"DNS_TYPE_KEY", Const, 0, ""}, + {"DNS_TYPE_KX", Const, 0, ""}, + {"DNS_TYPE_LOC", Const, 0, ""}, + {"DNS_TYPE_MAILA", Const, 0, ""}, + {"DNS_TYPE_MAILB", Const, 0, ""}, + {"DNS_TYPE_MB", Const, 0, ""}, + {"DNS_TYPE_MD", Const, 0, ""}, + {"DNS_TYPE_MF", Const, 0, ""}, + {"DNS_TYPE_MG", Const, 0, ""}, + {"DNS_TYPE_MINFO", Const, 0, ""}, + {"DNS_TYPE_MR", Const, 0, ""}, + {"DNS_TYPE_MX", Const, 0, ""}, + {"DNS_TYPE_NAPTR", Const, 0, ""}, + {"DNS_TYPE_NBSTAT", Const, 0, ""}, + {"DNS_TYPE_NIMLOC", Const, 0, ""}, + {"DNS_TYPE_NS", Const, 0, ""}, + {"DNS_TYPE_NSAP", Const, 0, ""}, + {"DNS_TYPE_NSAPPTR", Const, 0, ""}, + {"DNS_TYPE_NSEC", Const, 0, ""}, + {"DNS_TYPE_NULL", Const, 0, ""}, + {"DNS_TYPE_NXT", Const, 0, ""}, + {"DNS_TYPE_OPT", Const, 0, ""}, + {"DNS_TYPE_PTR", Const, 0, ""}, + {"DNS_TYPE_PX", Const, 0, ""}, + {"DNS_TYPE_RP", Const, 0, ""}, + {"DNS_TYPE_RRSIG", Const, 0, ""}, + {"DNS_TYPE_RT", Const, 0, ""}, + {"DNS_TYPE_SIG", Const, 0, ""}, + {"DNS_TYPE_SINK", Const, 0, ""}, + {"DNS_TYPE_SOA", Const, 0, ""}, + {"DNS_TYPE_SRV", Const, 0, ""}, + {"DNS_TYPE_TEXT", Const, 0, ""}, + {"DNS_TYPE_TKEY", Const, 0, ""}, + {"DNS_TYPE_TSIG", Const, 0, ""}, + {"DNS_TYPE_UID", Const, 0, ""}, + {"DNS_TYPE_UINFO", Const, 0, ""}, + {"DNS_TYPE_UNSPEC", Const, 0, ""}, + {"DNS_TYPE_WINS", Const, 0, ""}, + {"DNS_TYPE_WINSR", Const, 0, ""}, + {"DNS_TYPE_WKS", Const, 0, ""}, + {"DNS_TYPE_X25", Const, 0, ""}, + {"DT_BLK", Const, 0, ""}, + {"DT_CHR", Const, 0, ""}, + {"DT_DIR", Const, 0, ""}, + {"DT_FIFO", Const, 0, ""}, + {"DT_LNK", Const, 0, ""}, + {"DT_REG", Const, 0, ""}, + {"DT_SOCK", Const, 0, ""}, + {"DT_UNKNOWN", Const, 0, ""}, + {"DT_WHT", Const, 0, ""}, + {"DUPLICATE_CLOSE_SOURCE", Const, 0, ""}, + {"DUPLICATE_SAME_ACCESS", Const, 0, ""}, + {"DeleteFile", Func, 0, ""}, + {"DetachLsf", Func, 0, "func(fd int) error"}, + {"DeviceIoControl", Func, 4, ""}, + {"Dirent", Type, 0, ""}, + {"Dirent.Fileno", Field, 0, ""}, + {"Dirent.Ino", Field, 0, ""}, + {"Dirent.Name", Field, 0, ""}, + {"Dirent.Namlen", Field, 0, ""}, + {"Dirent.Off", Field, 0, ""}, + {"Dirent.Pad0", Field, 12, ""}, + {"Dirent.Pad1", Field, 12, ""}, + {"Dirent.Pad_cgo_0", Field, 0, ""}, + {"Dirent.Reclen", Field, 0, ""}, + {"Dirent.Seekoff", Field, 0, ""}, + {"Dirent.Type", Field, 0, ""}, + {"Dirent.X__d_padding", Field, 3, ""}, + {"DnsNameCompare", Func, 4, ""}, + {"DnsQuery", Func, 0, ""}, + {"DnsRecordListFree", Func, 0, ""}, + {"DnsSectionAdditional", Const, 4, ""}, + {"DnsSectionAnswer", Const, 4, ""}, + {"DnsSectionAuthority", Const, 4, ""}, + {"DnsSectionQuestion", Const, 4, ""}, + {"Dup", Func, 0, "func(oldfd int) (fd int, err error)"}, + {"Dup2", Func, 0, "func(oldfd int, newfd int) (err error)"}, + {"Dup3", Func, 2, "func(oldfd int, newfd int, flags int) (err error)"}, + {"DuplicateHandle", Func, 0, ""}, + {"E2BIG", Const, 0, ""}, + {"EACCES", Const, 0, ""}, + {"EADDRINUSE", Const, 0, ""}, + {"EADDRNOTAVAIL", Const, 0, ""}, + {"EADV", Const, 0, ""}, + {"EAFNOSUPPORT", Const, 0, ""}, + {"EAGAIN", Const, 0, ""}, + {"EALREADY", Const, 0, ""}, + {"EAUTH", Const, 0, ""}, + {"EBADARCH", Const, 0, ""}, + {"EBADE", Const, 0, ""}, + {"EBADEXEC", Const, 0, ""}, + {"EBADF", Const, 0, ""}, + {"EBADFD", Const, 0, ""}, + {"EBADMACHO", Const, 0, ""}, + {"EBADMSG", Const, 0, ""}, + {"EBADR", Const, 0, ""}, + {"EBADRPC", Const, 0, ""}, + {"EBADRQC", Const, 0, ""}, + {"EBADSLT", Const, 0, ""}, + {"EBFONT", Const, 0, ""}, + {"EBUSY", Const, 0, ""}, + {"ECANCELED", Const, 0, ""}, + {"ECAPMODE", Const, 1, ""}, + {"ECHILD", Const, 0, ""}, + {"ECHO", Const, 0, ""}, + {"ECHOCTL", Const, 0, ""}, + {"ECHOE", Const, 0, ""}, + {"ECHOK", Const, 0, ""}, + {"ECHOKE", Const, 0, ""}, + {"ECHONL", Const, 0, ""}, + {"ECHOPRT", Const, 0, ""}, + {"ECHRNG", Const, 0, ""}, + {"ECOMM", Const, 0, ""}, + {"ECONNABORTED", Const, 0, ""}, + {"ECONNREFUSED", Const, 0, ""}, + {"ECONNRESET", Const, 0, ""}, + {"EDEADLK", Const, 0, ""}, + {"EDEADLOCK", Const, 0, ""}, + {"EDESTADDRREQ", Const, 0, ""}, + {"EDEVERR", Const, 0, ""}, + {"EDOM", Const, 0, ""}, + {"EDOOFUS", Const, 0, ""}, + {"EDOTDOT", Const, 0, ""}, + {"EDQUOT", Const, 0, ""}, + {"EEXIST", Const, 0, ""}, + {"EFAULT", Const, 0, ""}, + {"EFBIG", Const, 0, ""}, + {"EFER_LMA", Const, 1, ""}, + {"EFER_LME", Const, 1, ""}, + {"EFER_NXE", Const, 1, ""}, + {"EFER_SCE", Const, 1, ""}, + {"EFTYPE", Const, 0, ""}, + {"EHOSTDOWN", Const, 0, ""}, + {"EHOSTUNREACH", Const, 0, ""}, + {"EHWPOISON", Const, 0, ""}, + {"EIDRM", Const, 0, ""}, + {"EILSEQ", Const, 0, ""}, + {"EINPROGRESS", Const, 0, ""}, + {"EINTR", Const, 0, ""}, + {"EINVAL", Const, 0, ""}, + {"EIO", Const, 0, ""}, + {"EIPSEC", Const, 1, ""}, + {"EISCONN", Const, 0, ""}, + {"EISDIR", Const, 0, ""}, + {"EISNAM", Const, 0, ""}, + {"EKEYEXPIRED", Const, 0, ""}, + {"EKEYREJECTED", Const, 0, ""}, + {"EKEYREVOKED", Const, 0, ""}, + {"EL2HLT", Const, 0, ""}, + {"EL2NSYNC", Const, 0, ""}, + {"EL3HLT", Const, 0, ""}, + {"EL3RST", Const, 0, ""}, + {"ELAST", Const, 0, ""}, + {"ELF_NGREG", Const, 0, ""}, + {"ELF_PRARGSZ", Const, 0, ""}, + {"ELIBACC", Const, 0, ""}, + {"ELIBBAD", Const, 0, ""}, + {"ELIBEXEC", Const, 0, ""}, + {"ELIBMAX", Const, 0, ""}, + {"ELIBSCN", Const, 0, ""}, + {"ELNRNG", Const, 0, ""}, + {"ELOOP", Const, 0, ""}, + {"EMEDIUMTYPE", Const, 0, ""}, + {"EMFILE", Const, 0, ""}, + {"EMLINK", Const, 0, ""}, + {"EMSGSIZE", Const, 0, ""}, + {"EMT_TAGOVF", Const, 1, ""}, + {"EMULTIHOP", Const, 0, ""}, + {"EMUL_ENABLED", Const, 1, ""}, + {"EMUL_LINUX", Const, 1, ""}, + {"EMUL_LINUX32", Const, 1, ""}, + {"EMUL_MAXID", Const, 1, ""}, + {"EMUL_NATIVE", Const, 1, ""}, + {"ENAMETOOLONG", Const, 0, ""}, + {"ENAVAIL", Const, 0, ""}, + {"ENDRUNDISC", Const, 1, ""}, + {"ENEEDAUTH", Const, 0, ""}, + {"ENETDOWN", Const, 0, ""}, + {"ENETRESET", Const, 0, ""}, + {"ENETUNREACH", Const, 0, ""}, + {"ENFILE", Const, 0, ""}, + {"ENOANO", Const, 0, ""}, + {"ENOATTR", Const, 0, ""}, + {"ENOBUFS", Const, 0, ""}, + {"ENOCSI", Const, 0, ""}, + {"ENODATA", Const, 0, ""}, + {"ENODEV", Const, 0, ""}, + {"ENOENT", Const, 0, ""}, + {"ENOEXEC", Const, 0, ""}, + {"ENOKEY", Const, 0, ""}, + {"ENOLCK", Const, 0, ""}, + {"ENOLINK", Const, 0, ""}, + {"ENOMEDIUM", Const, 0, ""}, + {"ENOMEM", Const, 0, ""}, + {"ENOMSG", Const, 0, ""}, + {"ENONET", Const, 0, ""}, + {"ENOPKG", Const, 0, ""}, + {"ENOPOLICY", Const, 0, ""}, + {"ENOPROTOOPT", Const, 0, ""}, + {"ENOSPC", Const, 0, ""}, + {"ENOSR", Const, 0, ""}, + {"ENOSTR", Const, 0, ""}, + {"ENOSYS", Const, 0, ""}, + {"ENOTBLK", Const, 0, ""}, + {"ENOTCAPABLE", Const, 0, ""}, + {"ENOTCONN", Const, 0, ""}, + {"ENOTDIR", Const, 0, ""}, + {"ENOTEMPTY", Const, 0, ""}, + {"ENOTNAM", Const, 0, ""}, + {"ENOTRECOVERABLE", Const, 0, ""}, + {"ENOTSOCK", Const, 0, ""}, + {"ENOTSUP", Const, 0, ""}, + {"ENOTTY", Const, 0, ""}, + {"ENOTUNIQ", Const, 0, ""}, + {"ENXIO", Const, 0, ""}, + {"EN_SW_CTL_INF", Const, 1, ""}, + {"EN_SW_CTL_PREC", Const, 1, ""}, + {"EN_SW_CTL_ROUND", Const, 1, ""}, + {"EN_SW_DATACHAIN", Const, 1, ""}, + {"EN_SW_DENORM", Const, 1, ""}, + {"EN_SW_INVOP", Const, 1, ""}, + {"EN_SW_OVERFLOW", Const, 1, ""}, + {"EN_SW_PRECLOSS", Const, 1, ""}, + {"EN_SW_UNDERFLOW", Const, 1, ""}, + {"EN_SW_ZERODIV", Const, 1, ""}, + {"EOPNOTSUPP", Const, 0, ""}, + {"EOVERFLOW", Const, 0, ""}, + {"EOWNERDEAD", Const, 0, ""}, + {"EPERM", Const, 0, ""}, + {"EPFNOSUPPORT", Const, 0, ""}, + {"EPIPE", Const, 0, ""}, + {"EPOLLERR", Const, 0, ""}, + {"EPOLLET", Const, 0, ""}, + {"EPOLLHUP", Const, 0, ""}, + {"EPOLLIN", Const, 0, ""}, + {"EPOLLMSG", Const, 0, ""}, + {"EPOLLONESHOT", Const, 0, ""}, + {"EPOLLOUT", Const, 0, ""}, + {"EPOLLPRI", Const, 0, ""}, + {"EPOLLRDBAND", Const, 0, ""}, + {"EPOLLRDHUP", Const, 0, ""}, + {"EPOLLRDNORM", Const, 0, ""}, + {"EPOLLWRBAND", Const, 0, ""}, + {"EPOLLWRNORM", Const, 0, ""}, + {"EPOLL_CLOEXEC", Const, 0, ""}, + {"EPOLL_CTL_ADD", Const, 0, ""}, + {"EPOLL_CTL_DEL", Const, 0, ""}, + {"EPOLL_CTL_MOD", Const, 0, ""}, + {"EPOLL_NONBLOCK", Const, 0, ""}, + {"EPROCLIM", Const, 0, ""}, + {"EPROCUNAVAIL", Const, 0, ""}, + {"EPROGMISMATCH", Const, 0, ""}, + {"EPROGUNAVAIL", Const, 0, ""}, + {"EPROTO", Const, 0, ""}, + {"EPROTONOSUPPORT", Const, 0, ""}, + {"EPROTOTYPE", Const, 0, ""}, + {"EPWROFF", Const, 0, ""}, + {"EQFULL", Const, 16, ""}, + {"ERANGE", Const, 0, ""}, + {"EREMCHG", Const, 0, ""}, + {"EREMOTE", Const, 0, ""}, + {"EREMOTEIO", Const, 0, ""}, + {"ERESTART", Const, 0, ""}, + {"ERFKILL", Const, 0, ""}, + {"EROFS", Const, 0, ""}, + {"ERPCMISMATCH", Const, 0, ""}, + {"ERROR_ACCESS_DENIED", Const, 0, ""}, + {"ERROR_ALREADY_EXISTS", Const, 0, ""}, + {"ERROR_BROKEN_PIPE", Const, 0, ""}, + {"ERROR_BUFFER_OVERFLOW", Const, 0, ""}, + {"ERROR_DIR_NOT_EMPTY", Const, 8, ""}, + {"ERROR_ENVVAR_NOT_FOUND", Const, 0, ""}, + {"ERROR_FILE_EXISTS", Const, 0, ""}, + {"ERROR_FILE_NOT_FOUND", Const, 0, ""}, + {"ERROR_HANDLE_EOF", Const, 2, ""}, + {"ERROR_INSUFFICIENT_BUFFER", Const, 0, ""}, + {"ERROR_IO_PENDING", Const, 0, ""}, + {"ERROR_MOD_NOT_FOUND", Const, 0, ""}, + {"ERROR_MORE_DATA", Const, 3, ""}, + {"ERROR_NETNAME_DELETED", Const, 3, ""}, + {"ERROR_NOT_FOUND", Const, 1, ""}, + {"ERROR_NO_MORE_FILES", Const, 0, ""}, + {"ERROR_OPERATION_ABORTED", Const, 0, ""}, + {"ERROR_PATH_NOT_FOUND", Const, 0, ""}, + {"ERROR_PRIVILEGE_NOT_HELD", Const, 4, ""}, + {"ERROR_PROC_NOT_FOUND", Const, 0, ""}, + {"ESHLIBVERS", Const, 0, ""}, + {"ESHUTDOWN", Const, 0, ""}, + {"ESOCKTNOSUPPORT", Const, 0, ""}, + {"ESPIPE", Const, 0, ""}, + {"ESRCH", Const, 0, ""}, + {"ESRMNT", Const, 0, ""}, + {"ESTALE", Const, 0, ""}, + {"ESTRPIPE", Const, 0, ""}, + {"ETHERCAP_JUMBO_MTU", Const, 1, ""}, + {"ETHERCAP_VLAN_HWTAGGING", Const, 1, ""}, + {"ETHERCAP_VLAN_MTU", Const, 1, ""}, + {"ETHERMIN", Const, 1, ""}, + {"ETHERMTU", Const, 1, ""}, + {"ETHERMTU_JUMBO", Const, 1, ""}, + {"ETHERTYPE_8023", Const, 1, ""}, + {"ETHERTYPE_AARP", Const, 1, ""}, + {"ETHERTYPE_ACCTON", Const, 1, ""}, + {"ETHERTYPE_AEONIC", Const, 1, ""}, + {"ETHERTYPE_ALPHA", Const, 1, ""}, + {"ETHERTYPE_AMBER", Const, 1, ""}, + {"ETHERTYPE_AMOEBA", Const, 1, ""}, + {"ETHERTYPE_AOE", Const, 1, ""}, + {"ETHERTYPE_APOLLO", Const, 1, ""}, + {"ETHERTYPE_APOLLODOMAIN", Const, 1, ""}, + {"ETHERTYPE_APPLETALK", Const, 1, ""}, + {"ETHERTYPE_APPLITEK", Const, 1, ""}, + {"ETHERTYPE_ARGONAUT", Const, 1, ""}, + {"ETHERTYPE_ARP", Const, 1, ""}, + {"ETHERTYPE_AT", Const, 1, ""}, + {"ETHERTYPE_ATALK", Const, 1, ""}, + {"ETHERTYPE_ATOMIC", Const, 1, ""}, + {"ETHERTYPE_ATT", Const, 1, ""}, + {"ETHERTYPE_ATTSTANFORD", Const, 1, ""}, + {"ETHERTYPE_AUTOPHON", Const, 1, ""}, + {"ETHERTYPE_AXIS", Const, 1, ""}, + {"ETHERTYPE_BCLOOP", Const, 1, ""}, + {"ETHERTYPE_BOFL", Const, 1, ""}, + {"ETHERTYPE_CABLETRON", Const, 1, ""}, + {"ETHERTYPE_CHAOS", Const, 1, ""}, + {"ETHERTYPE_COMDESIGN", Const, 1, ""}, + {"ETHERTYPE_COMPUGRAPHIC", Const, 1, ""}, + {"ETHERTYPE_COUNTERPOINT", Const, 1, ""}, + {"ETHERTYPE_CRONUS", Const, 1, ""}, + {"ETHERTYPE_CRONUSVLN", Const, 1, ""}, + {"ETHERTYPE_DCA", Const, 1, ""}, + {"ETHERTYPE_DDE", Const, 1, ""}, + {"ETHERTYPE_DEBNI", Const, 1, ""}, + {"ETHERTYPE_DECAM", Const, 1, ""}, + {"ETHERTYPE_DECCUST", Const, 1, ""}, + {"ETHERTYPE_DECDIAG", Const, 1, ""}, + {"ETHERTYPE_DECDNS", Const, 1, ""}, + {"ETHERTYPE_DECDTS", Const, 1, ""}, + {"ETHERTYPE_DECEXPER", Const, 1, ""}, + {"ETHERTYPE_DECLAST", Const, 1, ""}, + {"ETHERTYPE_DECLTM", Const, 1, ""}, + {"ETHERTYPE_DECMUMPS", Const, 1, ""}, + {"ETHERTYPE_DECNETBIOS", Const, 1, ""}, + {"ETHERTYPE_DELTACON", Const, 1, ""}, + {"ETHERTYPE_DIDDLE", Const, 1, ""}, + {"ETHERTYPE_DLOG1", Const, 1, ""}, + {"ETHERTYPE_DLOG2", Const, 1, ""}, + {"ETHERTYPE_DN", Const, 1, ""}, + {"ETHERTYPE_DOGFIGHT", Const, 1, ""}, + {"ETHERTYPE_DSMD", Const, 1, ""}, + {"ETHERTYPE_ECMA", Const, 1, ""}, + {"ETHERTYPE_ENCRYPT", Const, 1, ""}, + {"ETHERTYPE_ES", Const, 1, ""}, + {"ETHERTYPE_EXCELAN", Const, 1, ""}, + {"ETHERTYPE_EXPERDATA", Const, 1, ""}, + {"ETHERTYPE_FLIP", Const, 1, ""}, + {"ETHERTYPE_FLOWCONTROL", Const, 1, ""}, + {"ETHERTYPE_FRARP", Const, 1, ""}, + {"ETHERTYPE_GENDYN", Const, 1, ""}, + {"ETHERTYPE_HAYES", Const, 1, ""}, + {"ETHERTYPE_HIPPI_FP", Const, 1, ""}, + {"ETHERTYPE_HITACHI", Const, 1, ""}, + {"ETHERTYPE_HP", Const, 1, ""}, + {"ETHERTYPE_IEEEPUP", Const, 1, ""}, + {"ETHERTYPE_IEEEPUPAT", Const, 1, ""}, + {"ETHERTYPE_IMLBL", Const, 1, ""}, + {"ETHERTYPE_IMLBLDIAG", Const, 1, ""}, + {"ETHERTYPE_IP", Const, 1, ""}, + {"ETHERTYPE_IPAS", Const, 1, ""}, + {"ETHERTYPE_IPV6", Const, 1, ""}, + {"ETHERTYPE_IPX", Const, 1, ""}, + {"ETHERTYPE_IPXNEW", Const, 1, ""}, + {"ETHERTYPE_KALPANA", Const, 1, ""}, + {"ETHERTYPE_LANBRIDGE", Const, 1, ""}, + {"ETHERTYPE_LANPROBE", Const, 1, ""}, + {"ETHERTYPE_LAT", Const, 1, ""}, + {"ETHERTYPE_LBACK", Const, 1, ""}, + {"ETHERTYPE_LITTLE", Const, 1, ""}, + {"ETHERTYPE_LLDP", Const, 1, ""}, + {"ETHERTYPE_LOGICRAFT", Const, 1, ""}, + {"ETHERTYPE_LOOPBACK", Const, 1, ""}, + {"ETHERTYPE_MATRA", Const, 1, ""}, + {"ETHERTYPE_MAX", Const, 1, ""}, + {"ETHERTYPE_MERIT", Const, 1, ""}, + {"ETHERTYPE_MICP", Const, 1, ""}, + {"ETHERTYPE_MOPDL", Const, 1, ""}, + {"ETHERTYPE_MOPRC", Const, 1, ""}, + {"ETHERTYPE_MOTOROLA", Const, 1, ""}, + {"ETHERTYPE_MPLS", Const, 1, ""}, + {"ETHERTYPE_MPLS_MCAST", Const, 1, ""}, + {"ETHERTYPE_MUMPS", Const, 1, ""}, + {"ETHERTYPE_NBPCC", Const, 1, ""}, + {"ETHERTYPE_NBPCLAIM", Const, 1, ""}, + {"ETHERTYPE_NBPCLREQ", Const, 1, ""}, + {"ETHERTYPE_NBPCLRSP", Const, 1, ""}, + {"ETHERTYPE_NBPCREQ", Const, 1, ""}, + {"ETHERTYPE_NBPCRSP", Const, 1, ""}, + {"ETHERTYPE_NBPDG", Const, 1, ""}, + {"ETHERTYPE_NBPDGB", Const, 1, ""}, + {"ETHERTYPE_NBPDLTE", Const, 1, ""}, + {"ETHERTYPE_NBPRAR", Const, 1, ""}, + {"ETHERTYPE_NBPRAS", Const, 1, ""}, + {"ETHERTYPE_NBPRST", Const, 1, ""}, + {"ETHERTYPE_NBPSCD", Const, 1, ""}, + {"ETHERTYPE_NBPVCD", Const, 1, ""}, + {"ETHERTYPE_NBS", Const, 1, ""}, + {"ETHERTYPE_NCD", Const, 1, ""}, + {"ETHERTYPE_NESTAR", Const, 1, ""}, + {"ETHERTYPE_NETBEUI", Const, 1, ""}, + {"ETHERTYPE_NOVELL", Const, 1, ""}, + {"ETHERTYPE_NS", Const, 1, ""}, + {"ETHERTYPE_NSAT", Const, 1, ""}, + {"ETHERTYPE_NSCOMPAT", Const, 1, ""}, + {"ETHERTYPE_NTRAILER", Const, 1, ""}, + {"ETHERTYPE_OS9", Const, 1, ""}, + {"ETHERTYPE_OS9NET", Const, 1, ""}, + {"ETHERTYPE_PACER", Const, 1, ""}, + {"ETHERTYPE_PAE", Const, 1, ""}, + {"ETHERTYPE_PCS", Const, 1, ""}, + {"ETHERTYPE_PLANNING", Const, 1, ""}, + {"ETHERTYPE_PPP", Const, 1, ""}, + {"ETHERTYPE_PPPOE", Const, 1, ""}, + {"ETHERTYPE_PPPOEDISC", Const, 1, ""}, + {"ETHERTYPE_PRIMENTS", Const, 1, ""}, + {"ETHERTYPE_PUP", Const, 1, ""}, + {"ETHERTYPE_PUPAT", Const, 1, ""}, + {"ETHERTYPE_QINQ", Const, 1, ""}, + {"ETHERTYPE_RACAL", Const, 1, ""}, + {"ETHERTYPE_RATIONAL", Const, 1, ""}, + {"ETHERTYPE_RAWFR", Const, 1, ""}, + {"ETHERTYPE_RCL", Const, 1, ""}, + {"ETHERTYPE_RDP", Const, 1, ""}, + {"ETHERTYPE_RETIX", Const, 1, ""}, + {"ETHERTYPE_REVARP", Const, 1, ""}, + {"ETHERTYPE_SCA", Const, 1, ""}, + {"ETHERTYPE_SECTRA", Const, 1, ""}, + {"ETHERTYPE_SECUREDATA", Const, 1, ""}, + {"ETHERTYPE_SGITW", Const, 1, ""}, + {"ETHERTYPE_SG_BOUNCE", Const, 1, ""}, + {"ETHERTYPE_SG_DIAG", Const, 1, ""}, + {"ETHERTYPE_SG_NETGAMES", Const, 1, ""}, + {"ETHERTYPE_SG_RESV", Const, 1, ""}, + {"ETHERTYPE_SIMNET", Const, 1, ""}, + {"ETHERTYPE_SLOW", Const, 1, ""}, + {"ETHERTYPE_SLOWPROTOCOLS", Const, 1, ""}, + {"ETHERTYPE_SNA", Const, 1, ""}, + {"ETHERTYPE_SNMP", Const, 1, ""}, + {"ETHERTYPE_SONIX", Const, 1, ""}, + {"ETHERTYPE_SPIDER", Const, 1, ""}, + {"ETHERTYPE_SPRITE", Const, 1, ""}, + {"ETHERTYPE_STP", Const, 1, ""}, + {"ETHERTYPE_TALARIS", Const, 1, ""}, + {"ETHERTYPE_TALARISMC", Const, 1, ""}, + {"ETHERTYPE_TCPCOMP", Const, 1, ""}, + {"ETHERTYPE_TCPSM", Const, 1, ""}, + {"ETHERTYPE_TEC", Const, 1, ""}, + {"ETHERTYPE_TIGAN", Const, 1, ""}, + {"ETHERTYPE_TRAIL", Const, 1, ""}, + {"ETHERTYPE_TRANSETHER", Const, 1, ""}, + {"ETHERTYPE_TYMSHARE", Const, 1, ""}, + {"ETHERTYPE_UBBST", Const, 1, ""}, + {"ETHERTYPE_UBDEBUG", Const, 1, ""}, + {"ETHERTYPE_UBDIAGLOOP", Const, 1, ""}, + {"ETHERTYPE_UBDL", Const, 1, ""}, + {"ETHERTYPE_UBNIU", Const, 1, ""}, + {"ETHERTYPE_UBNMC", Const, 1, ""}, + {"ETHERTYPE_VALID", Const, 1, ""}, + {"ETHERTYPE_VARIAN", Const, 1, ""}, + {"ETHERTYPE_VAXELN", Const, 1, ""}, + {"ETHERTYPE_VEECO", Const, 1, ""}, + {"ETHERTYPE_VEXP", Const, 1, ""}, + {"ETHERTYPE_VGLAB", Const, 1, ""}, + {"ETHERTYPE_VINES", Const, 1, ""}, + {"ETHERTYPE_VINESECHO", Const, 1, ""}, + {"ETHERTYPE_VINESLOOP", Const, 1, ""}, + {"ETHERTYPE_VITAL", Const, 1, ""}, + {"ETHERTYPE_VLAN", Const, 1, ""}, + {"ETHERTYPE_VLTLMAN", Const, 1, ""}, + {"ETHERTYPE_VPROD", Const, 1, ""}, + {"ETHERTYPE_VURESERVED", Const, 1, ""}, + {"ETHERTYPE_WATERLOO", Const, 1, ""}, + {"ETHERTYPE_WELLFLEET", Const, 1, ""}, + {"ETHERTYPE_X25", Const, 1, ""}, + {"ETHERTYPE_X75", Const, 1, ""}, + {"ETHERTYPE_XNSSM", Const, 1, ""}, + {"ETHERTYPE_XTP", Const, 1, ""}, + {"ETHER_ADDR_LEN", Const, 1, ""}, + {"ETHER_ALIGN", Const, 1, ""}, + {"ETHER_CRC_LEN", Const, 1, ""}, + {"ETHER_CRC_POLY_BE", Const, 1, ""}, + {"ETHER_CRC_POLY_LE", Const, 1, ""}, + {"ETHER_HDR_LEN", Const, 1, ""}, + {"ETHER_MAX_DIX_LEN", Const, 1, ""}, + {"ETHER_MAX_LEN", Const, 1, ""}, + {"ETHER_MAX_LEN_JUMBO", Const, 1, ""}, + {"ETHER_MIN_LEN", Const, 1, ""}, + {"ETHER_PPPOE_ENCAP_LEN", Const, 1, ""}, + {"ETHER_TYPE_LEN", Const, 1, ""}, + {"ETHER_VLAN_ENCAP_LEN", Const, 1, ""}, + {"ETH_P_1588", Const, 0, ""}, + {"ETH_P_8021Q", Const, 0, ""}, + {"ETH_P_802_2", Const, 0, ""}, + {"ETH_P_802_3", Const, 0, ""}, + {"ETH_P_AARP", Const, 0, ""}, + {"ETH_P_ALL", Const, 0, ""}, + {"ETH_P_AOE", Const, 0, ""}, + {"ETH_P_ARCNET", Const, 0, ""}, + {"ETH_P_ARP", Const, 0, ""}, + {"ETH_P_ATALK", Const, 0, ""}, + {"ETH_P_ATMFATE", Const, 0, ""}, + {"ETH_P_ATMMPOA", Const, 0, ""}, + {"ETH_P_AX25", Const, 0, ""}, + {"ETH_P_BPQ", Const, 0, ""}, + {"ETH_P_CAIF", Const, 0, ""}, + {"ETH_P_CAN", Const, 0, ""}, + {"ETH_P_CONTROL", Const, 0, ""}, + {"ETH_P_CUST", Const, 0, ""}, + {"ETH_P_DDCMP", Const, 0, ""}, + {"ETH_P_DEC", Const, 0, ""}, + {"ETH_P_DIAG", Const, 0, ""}, + {"ETH_P_DNA_DL", Const, 0, ""}, + {"ETH_P_DNA_RC", Const, 0, ""}, + {"ETH_P_DNA_RT", Const, 0, ""}, + {"ETH_P_DSA", Const, 0, ""}, + {"ETH_P_ECONET", Const, 0, ""}, + {"ETH_P_EDSA", Const, 0, ""}, + {"ETH_P_FCOE", Const, 0, ""}, + {"ETH_P_FIP", Const, 0, ""}, + {"ETH_P_HDLC", Const, 0, ""}, + {"ETH_P_IEEE802154", Const, 0, ""}, + {"ETH_P_IEEEPUP", Const, 0, ""}, + {"ETH_P_IEEEPUPAT", Const, 0, ""}, + {"ETH_P_IP", Const, 0, ""}, + {"ETH_P_IPV6", Const, 0, ""}, + {"ETH_P_IPX", Const, 0, ""}, + {"ETH_P_IRDA", Const, 0, ""}, + {"ETH_P_LAT", Const, 0, ""}, + {"ETH_P_LINK_CTL", Const, 0, ""}, + {"ETH_P_LOCALTALK", Const, 0, ""}, + {"ETH_P_LOOP", Const, 0, ""}, + {"ETH_P_MOBITEX", Const, 0, ""}, + {"ETH_P_MPLS_MC", Const, 0, ""}, + {"ETH_P_MPLS_UC", Const, 0, ""}, + {"ETH_P_PAE", Const, 0, ""}, + {"ETH_P_PAUSE", Const, 0, ""}, + {"ETH_P_PHONET", Const, 0, ""}, + {"ETH_P_PPPTALK", Const, 0, ""}, + {"ETH_P_PPP_DISC", Const, 0, ""}, + {"ETH_P_PPP_MP", Const, 0, ""}, + {"ETH_P_PPP_SES", Const, 0, ""}, + {"ETH_P_PUP", Const, 0, ""}, + {"ETH_P_PUPAT", Const, 0, ""}, + {"ETH_P_RARP", Const, 0, ""}, + {"ETH_P_SCA", Const, 0, ""}, + {"ETH_P_SLOW", Const, 0, ""}, + {"ETH_P_SNAP", Const, 0, ""}, + {"ETH_P_TEB", Const, 0, ""}, + {"ETH_P_TIPC", Const, 0, ""}, + {"ETH_P_TRAILER", Const, 0, ""}, + {"ETH_P_TR_802_2", Const, 0, ""}, + {"ETH_P_WAN_PPP", Const, 0, ""}, + {"ETH_P_WCCP", Const, 0, ""}, + {"ETH_P_X25", Const, 0, ""}, + {"ETIME", Const, 0, ""}, + {"ETIMEDOUT", Const, 0, ""}, + {"ETOOMANYREFS", Const, 0, ""}, + {"ETXTBSY", Const, 0, ""}, + {"EUCLEAN", Const, 0, ""}, + {"EUNATCH", Const, 0, ""}, + {"EUSERS", Const, 0, ""}, + {"EVFILT_AIO", Const, 0, ""}, + {"EVFILT_FS", Const, 0, ""}, + {"EVFILT_LIO", Const, 0, ""}, + {"EVFILT_MACHPORT", Const, 0, ""}, + {"EVFILT_PROC", Const, 0, ""}, + {"EVFILT_READ", Const, 0, ""}, + {"EVFILT_SIGNAL", Const, 0, ""}, + {"EVFILT_SYSCOUNT", Const, 0, ""}, + {"EVFILT_THREADMARKER", Const, 0, ""}, + {"EVFILT_TIMER", Const, 0, ""}, + {"EVFILT_USER", Const, 0, ""}, + {"EVFILT_VM", Const, 0, ""}, + {"EVFILT_VNODE", Const, 0, ""}, + {"EVFILT_WRITE", Const, 0, ""}, + {"EV_ADD", Const, 0, ""}, + {"EV_CLEAR", Const, 0, ""}, + {"EV_DELETE", Const, 0, ""}, + {"EV_DISABLE", Const, 0, ""}, + {"EV_DISPATCH", Const, 0, ""}, + {"EV_DROP", Const, 3, ""}, + {"EV_ENABLE", Const, 0, ""}, + {"EV_EOF", Const, 0, ""}, + {"EV_ERROR", Const, 0, ""}, + {"EV_FLAG0", Const, 0, ""}, + {"EV_FLAG1", Const, 0, ""}, + {"EV_ONESHOT", Const, 0, ""}, + {"EV_OOBAND", Const, 0, ""}, + {"EV_POLL", Const, 0, ""}, + {"EV_RECEIPT", Const, 0, ""}, + {"EV_SYSFLAGS", Const, 0, ""}, + {"EWINDOWS", Const, 0, ""}, + {"EWOULDBLOCK", Const, 0, ""}, + {"EXDEV", Const, 0, ""}, + {"EXFULL", Const, 0, ""}, + {"EXTA", Const, 0, ""}, + {"EXTB", Const, 0, ""}, + {"EXTPROC", Const, 0, ""}, + {"Environ", Func, 0, "func() []string"}, + {"EpollCreate", Func, 0, "func(size int) (fd int, err error)"}, + {"EpollCreate1", Func, 0, "func(flag int) (fd int, err error)"}, + {"EpollCtl", Func, 0, "func(epfd int, op int, fd int, event *EpollEvent) (err error)"}, + {"EpollEvent", Type, 0, ""}, + {"EpollEvent.Events", Field, 0, ""}, + {"EpollEvent.Fd", Field, 0, ""}, + {"EpollEvent.Pad", Field, 0, ""}, + {"EpollEvent.PadFd", Field, 0, ""}, + {"EpollWait", Func, 0, "func(epfd int, events []EpollEvent, msec int) (n int, err error)"}, + {"Errno", Type, 0, ""}, + {"EscapeArg", Func, 0, ""}, + {"Exchangedata", Func, 0, ""}, + {"Exec", Func, 0, "func(argv0 string, argv []string, envv []string) (err error)"}, + {"Exit", Func, 0, "func(code int)"}, + {"ExitProcess", Func, 0, ""}, + {"FD_CLOEXEC", Const, 0, ""}, + {"FD_SETSIZE", Const, 0, ""}, + {"FILE_ACTION_ADDED", Const, 0, ""}, + {"FILE_ACTION_MODIFIED", Const, 0, ""}, + {"FILE_ACTION_REMOVED", Const, 0, ""}, + {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0, ""}, + {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0, ""}, + {"FILE_APPEND_DATA", Const, 0, ""}, + {"FILE_ATTRIBUTE_ARCHIVE", Const, 0, ""}, + {"FILE_ATTRIBUTE_DIRECTORY", Const, 0, ""}, + {"FILE_ATTRIBUTE_HIDDEN", Const, 0, ""}, + {"FILE_ATTRIBUTE_NORMAL", Const, 0, ""}, + {"FILE_ATTRIBUTE_READONLY", Const, 0, ""}, + {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4, ""}, + {"FILE_ATTRIBUTE_SYSTEM", Const, 0, ""}, + {"FILE_BEGIN", Const, 0, ""}, + {"FILE_CURRENT", Const, 0, ""}, + {"FILE_END", Const, 0, ""}, + {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0, ""}, + {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4, ""}, + {"FILE_FLAG_OVERLAPPED", Const, 0, ""}, + {"FILE_LIST_DIRECTORY", Const, 0, ""}, + {"FILE_MAP_COPY", Const, 0, ""}, + {"FILE_MAP_EXECUTE", Const, 0, ""}, + {"FILE_MAP_READ", Const, 0, ""}, + {"FILE_MAP_WRITE", Const, 0, ""}, + {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0, ""}, + {"FILE_NOTIFY_CHANGE_CREATION", Const, 0, ""}, + {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0, ""}, + {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0, ""}, + {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0, ""}, + {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0, ""}, + {"FILE_NOTIFY_CHANGE_SIZE", Const, 0, ""}, + {"FILE_SHARE_DELETE", Const, 0, ""}, + {"FILE_SHARE_READ", Const, 0, ""}, + {"FILE_SHARE_WRITE", Const, 0, ""}, + {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2, ""}, + {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2, ""}, + {"FILE_TYPE_CHAR", Const, 0, ""}, + {"FILE_TYPE_DISK", Const, 0, ""}, + {"FILE_TYPE_PIPE", Const, 0, ""}, + {"FILE_TYPE_REMOTE", Const, 0, ""}, + {"FILE_TYPE_UNKNOWN", Const, 0, ""}, + {"FILE_WRITE_ATTRIBUTES", Const, 0, ""}, + {"FLUSHO", Const, 0, ""}, + {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0, ""}, + {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0, ""}, + {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0, ""}, + {"FORMAT_MESSAGE_FROM_STRING", Const, 0, ""}, + {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0, ""}, + {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0, ""}, + {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0, ""}, + {"FSCTL_GET_REPARSE_POINT", Const, 4, ""}, + {"F_ADDFILESIGS", Const, 0, ""}, + {"F_ADDSIGS", Const, 0, ""}, + {"F_ALLOCATEALL", Const, 0, ""}, + {"F_ALLOCATECONTIG", Const, 0, ""}, + {"F_CANCEL", Const, 0, ""}, + {"F_CHKCLEAN", Const, 0, ""}, + {"F_CLOSEM", Const, 1, ""}, + {"F_DUP2FD", Const, 0, ""}, + {"F_DUP2FD_CLOEXEC", Const, 1, ""}, + {"F_DUPFD", Const, 0, ""}, + {"F_DUPFD_CLOEXEC", Const, 0, ""}, + {"F_EXLCK", Const, 0, ""}, + {"F_FINDSIGS", Const, 16, ""}, + {"F_FLUSH_DATA", Const, 0, ""}, + {"F_FREEZE_FS", Const, 0, ""}, + {"F_FSCTL", Const, 1, ""}, + {"F_FSDIRMASK", Const, 1, ""}, + {"F_FSIN", Const, 1, ""}, + {"F_FSINOUT", Const, 1, ""}, + {"F_FSOUT", Const, 1, ""}, + {"F_FSPRIV", Const, 1, ""}, + {"F_FSVOID", Const, 1, ""}, + {"F_FULLFSYNC", Const, 0, ""}, + {"F_GETCODEDIR", Const, 16, ""}, + {"F_GETFD", Const, 0, ""}, + {"F_GETFL", Const, 0, ""}, + {"F_GETLEASE", Const, 0, ""}, + {"F_GETLK", Const, 0, ""}, + {"F_GETLK64", Const, 0, ""}, + {"F_GETLKPID", Const, 0, ""}, + {"F_GETNOSIGPIPE", Const, 0, ""}, + {"F_GETOWN", Const, 0, ""}, + {"F_GETOWN_EX", Const, 0, ""}, + {"F_GETPATH", Const, 0, ""}, + {"F_GETPATH_MTMINFO", Const, 0, ""}, + {"F_GETPIPE_SZ", Const, 0, ""}, + {"F_GETPROTECTIONCLASS", Const, 0, ""}, + {"F_GETPROTECTIONLEVEL", Const, 16, ""}, + {"F_GETSIG", Const, 0, ""}, + {"F_GLOBAL_NOCACHE", Const, 0, ""}, + {"F_LOCK", Const, 0, ""}, + {"F_LOG2PHYS", Const, 0, ""}, + {"F_LOG2PHYS_EXT", Const, 0, ""}, + {"F_MARKDEPENDENCY", Const, 0, ""}, + {"F_MAXFD", Const, 1, ""}, + {"F_NOCACHE", Const, 0, ""}, + {"F_NODIRECT", Const, 0, ""}, + {"F_NOTIFY", Const, 0, ""}, + {"F_OGETLK", Const, 0, ""}, + {"F_OK", Const, 0, ""}, + {"F_OSETLK", Const, 0, ""}, + {"F_OSETLKW", Const, 0, ""}, + {"F_PARAM_MASK", Const, 1, ""}, + {"F_PARAM_MAX", Const, 1, ""}, + {"F_PATHPKG_CHECK", Const, 0, ""}, + {"F_PEOFPOSMODE", Const, 0, ""}, + {"F_PREALLOCATE", Const, 0, ""}, + {"F_RDADVISE", Const, 0, ""}, + {"F_RDAHEAD", Const, 0, ""}, + {"F_RDLCK", Const, 0, ""}, + {"F_READAHEAD", Const, 0, ""}, + {"F_READBOOTSTRAP", Const, 0, ""}, + {"F_SETBACKINGSTORE", Const, 0, ""}, + {"F_SETFD", Const, 0, ""}, + {"F_SETFL", Const, 0, ""}, + {"F_SETLEASE", Const, 0, ""}, + {"F_SETLK", Const, 0, ""}, + {"F_SETLK64", Const, 0, ""}, + {"F_SETLKW", Const, 0, ""}, + {"F_SETLKW64", Const, 0, ""}, + {"F_SETLKWTIMEOUT", Const, 16, ""}, + {"F_SETLK_REMOTE", Const, 0, ""}, + {"F_SETNOSIGPIPE", Const, 0, ""}, + {"F_SETOWN", Const, 0, ""}, + {"F_SETOWN_EX", Const, 0, ""}, + {"F_SETPIPE_SZ", Const, 0, ""}, + {"F_SETPROTECTIONCLASS", Const, 0, ""}, + {"F_SETSIG", Const, 0, ""}, + {"F_SETSIZE", Const, 0, ""}, + {"F_SHLCK", Const, 0, ""}, + {"F_SINGLE_WRITER", Const, 16, ""}, + {"F_TEST", Const, 0, ""}, + {"F_THAW_FS", Const, 0, ""}, + {"F_TLOCK", Const, 0, ""}, + {"F_TRANSCODEKEY", Const, 16, ""}, + {"F_ULOCK", Const, 0, ""}, + {"F_UNLCK", Const, 0, ""}, + {"F_UNLCKSYS", Const, 0, ""}, + {"F_VOLPOSMODE", Const, 0, ""}, + {"F_WRITEBOOTSTRAP", Const, 0, ""}, + {"F_WRLCK", Const, 0, ""}, + {"Faccessat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) (err error)"}, + {"Fallocate", Func, 0, "func(fd int, mode uint32, off int64, len int64) (err error)"}, + {"Fbootstraptransfer_t", Type, 0, ""}, + {"Fbootstraptransfer_t.Buffer", Field, 0, ""}, + {"Fbootstraptransfer_t.Length", Field, 0, ""}, + {"Fbootstraptransfer_t.Offset", Field, 0, ""}, + {"Fchdir", Func, 0, "func(fd int) (err error)"}, + {"Fchflags", Func, 0, ""}, + {"Fchmod", Func, 0, "func(fd int, mode uint32) (err error)"}, + {"Fchmodat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) error"}, + {"Fchown", Func, 0, "func(fd int, uid int, gid int) (err error)"}, + {"Fchownat", Func, 0, "func(dirfd int, path string, uid int, gid int, flags int) (err error)"}, + {"FcntlFlock", Func, 3, "func(fd uintptr, cmd int, lk *Flock_t) error"}, + {"FdSet", Type, 0, ""}, + {"FdSet.Bits", Field, 0, ""}, + {"FdSet.X__fds_bits", Field, 0, ""}, + {"Fdatasync", Func, 0, "func(fd int) (err error)"}, + {"FileNotifyInformation", Type, 0, ""}, + {"FileNotifyInformation.Action", Field, 0, ""}, + {"FileNotifyInformation.FileName", Field, 0, ""}, + {"FileNotifyInformation.FileNameLength", Field, 0, ""}, + {"FileNotifyInformation.NextEntryOffset", Field, 0, ""}, + {"Filetime", Type, 0, ""}, + {"Filetime.HighDateTime", Field, 0, ""}, + {"Filetime.LowDateTime", Field, 0, ""}, + {"FindClose", Func, 0, ""}, + {"FindFirstFile", Func, 0, ""}, + {"FindNextFile", Func, 0, ""}, + {"Flock", Func, 0, "func(fd int, how int) (err error)"}, + {"Flock_t", Type, 0, ""}, + {"Flock_t.Len", Field, 0, ""}, + {"Flock_t.Pad_cgo_0", Field, 0, ""}, + {"Flock_t.Pad_cgo_1", Field, 3, ""}, + {"Flock_t.Pid", Field, 0, ""}, + {"Flock_t.Start", Field, 0, ""}, + {"Flock_t.Sysid", Field, 0, ""}, + {"Flock_t.Type", Field, 0, ""}, + {"Flock_t.Whence", Field, 0, ""}, + {"FlushBpf", Func, 0, ""}, + {"FlushFileBuffers", Func, 0, ""}, + {"FlushViewOfFile", Func, 0, ""}, + {"ForkExec", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)"}, + {"ForkLock", Var, 0, ""}, + {"FormatMessage", Func, 0, ""}, + {"Fpathconf", Func, 0, ""}, + {"FreeAddrInfoW", Func, 1, ""}, + {"FreeEnvironmentStrings", Func, 0, ""}, + {"FreeLibrary", Func, 0, ""}, + {"Fsid", Type, 0, ""}, + {"Fsid.Val", Field, 0, ""}, + {"Fsid.X__fsid_val", Field, 2, ""}, + {"Fsid.X__val", Field, 0, ""}, + {"Fstat", Func, 0, "func(fd int, stat *Stat_t) (err error)"}, + {"Fstatat", Func, 12, ""}, + {"Fstatfs", Func, 0, "func(fd int, buf *Statfs_t) (err error)"}, + {"Fstore_t", Type, 0, ""}, + {"Fstore_t.Bytesalloc", Field, 0, ""}, + {"Fstore_t.Flags", Field, 0, ""}, + {"Fstore_t.Length", Field, 0, ""}, + {"Fstore_t.Offset", Field, 0, ""}, + {"Fstore_t.Posmode", Field, 0, ""}, + {"Fsync", Func, 0, "func(fd int) (err error)"}, + {"Ftruncate", Func, 0, "func(fd int, length int64) (err error)"}, + {"FullPath", Func, 4, ""}, + {"Futimes", Func, 0, "func(fd int, tv []Timeval) (err error)"}, + {"Futimesat", Func, 0, "func(dirfd int, path string, tv []Timeval) (err error)"}, + {"GENERIC_ALL", Const, 0, ""}, + {"GENERIC_EXECUTE", Const, 0, ""}, + {"GENERIC_READ", Const, 0, ""}, + {"GENERIC_WRITE", Const, 0, ""}, + {"GUID", Type, 1, ""}, + {"GUID.Data1", Field, 1, ""}, + {"GUID.Data2", Field, 1, ""}, + {"GUID.Data3", Field, 1, ""}, + {"GUID.Data4", Field, 1, ""}, + {"GetAcceptExSockaddrs", Func, 0, ""}, + {"GetAdaptersInfo", Func, 0, ""}, + {"GetAddrInfoW", Func, 1, ""}, + {"GetCommandLine", Func, 0, ""}, + {"GetComputerName", Func, 0, ""}, + {"GetConsoleMode", Func, 1, ""}, + {"GetCurrentDirectory", Func, 0, ""}, + {"GetCurrentProcess", Func, 0, ""}, + {"GetEnvironmentStrings", Func, 0, ""}, + {"GetEnvironmentVariable", Func, 0, ""}, + {"GetExitCodeProcess", Func, 0, ""}, + {"GetFileAttributes", Func, 0, ""}, + {"GetFileAttributesEx", Func, 0, ""}, + {"GetFileExInfoStandard", Const, 0, ""}, + {"GetFileExMaxInfoLevel", Const, 0, ""}, + {"GetFileInformationByHandle", Func, 0, ""}, + {"GetFileType", Func, 0, ""}, + {"GetFullPathName", Func, 0, ""}, + {"GetHostByName", Func, 0, ""}, + {"GetIfEntry", Func, 0, ""}, + {"GetLastError", Func, 0, ""}, + {"GetLengthSid", Func, 0, ""}, + {"GetLongPathName", Func, 0, ""}, + {"GetProcAddress", Func, 0, ""}, + {"GetProcessTimes", Func, 0, ""}, + {"GetProtoByName", Func, 0, ""}, + {"GetQueuedCompletionStatus", Func, 0, ""}, + {"GetServByName", Func, 0, ""}, + {"GetShortPathName", Func, 0, ""}, + {"GetStartupInfo", Func, 0, ""}, + {"GetStdHandle", Func, 0, ""}, + {"GetSystemTimeAsFileTime", Func, 0, ""}, + {"GetTempPath", Func, 0, ""}, + {"GetTimeZoneInformation", Func, 0, ""}, + {"GetTokenInformation", Func, 0, ""}, + {"GetUserNameEx", Func, 0, ""}, + {"GetUserProfileDirectory", Func, 0, ""}, + {"GetVersion", Func, 0, ""}, + {"Getcwd", Func, 0, "func(buf []byte) (n int, err error)"}, + {"Getdents", Func, 0, "func(fd int, buf []byte) (n int, err error)"}, + {"Getdirentries", Func, 0, ""}, + {"Getdtablesize", Func, 0, ""}, + {"Getegid", Func, 0, "func() (egid int)"}, + {"Getenv", Func, 0, "func(key string) (value string, found bool)"}, + {"Geteuid", Func, 0, "func() (euid int)"}, + {"Getfsstat", Func, 0, ""}, + {"Getgid", Func, 0, "func() (gid int)"}, + {"Getgroups", Func, 0, "func() (gids []int, err error)"}, + {"Getpagesize", Func, 0, "func() int"}, + {"Getpeername", Func, 0, "func(fd int) (sa Sockaddr, err error)"}, + {"Getpgid", Func, 0, "func(pid int) (pgid int, err error)"}, + {"Getpgrp", Func, 0, "func() (pid int)"}, + {"Getpid", Func, 0, "func() (pid int)"}, + {"Getppid", Func, 0, "func() (ppid int)"}, + {"Getpriority", Func, 0, "func(which int, who int) (prio int, err error)"}, + {"Getrlimit", Func, 0, "func(resource int, rlim *Rlimit) (err error)"}, + {"Getrusage", Func, 0, "func(who int, rusage *Rusage) (err error)"}, + {"Getsid", Func, 0, ""}, + {"Getsockname", Func, 0, "func(fd int) (sa Sockaddr, err error)"}, + {"Getsockopt", Func, 1, ""}, + {"GetsockoptByte", Func, 0, ""}, + {"GetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int) (*ICMPv6Filter, error)"}, + {"GetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int) (*IPMreq, error)"}, + {"GetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int) (*IPMreqn, error)"}, + {"GetsockoptIPv6MTUInfo", Func, 2, "func(fd int, level int, opt int) (*IPv6MTUInfo, error)"}, + {"GetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int) (*IPv6Mreq, error)"}, + {"GetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int) (value [4]byte, err error)"}, + {"GetsockoptInt", Func, 0, "func(fd int, level int, opt int) (value int, err error)"}, + {"GetsockoptUcred", Func, 1, "func(fd int, level int, opt int) (*Ucred, error)"}, + {"Gettid", Func, 0, "func() (tid int)"}, + {"Gettimeofday", Func, 0, "func(tv *Timeval) (err error)"}, + {"Getuid", Func, 0, "func() (uid int)"}, + {"Getwd", Func, 0, "func() (wd string, err error)"}, + {"Getxattr", Func, 1, "func(path string, attr string, dest []byte) (sz int, err error)"}, + {"HANDLE_FLAG_INHERIT", Const, 0, ""}, + {"HKEY_CLASSES_ROOT", Const, 0, ""}, + {"HKEY_CURRENT_CONFIG", Const, 0, ""}, + {"HKEY_CURRENT_USER", Const, 0, ""}, + {"HKEY_DYN_DATA", Const, 0, ""}, + {"HKEY_LOCAL_MACHINE", Const, 0, ""}, + {"HKEY_PERFORMANCE_DATA", Const, 0, ""}, + {"HKEY_USERS", Const, 0, ""}, + {"HUPCL", Const, 0, ""}, + {"Handle", Type, 0, ""}, + {"Hostent", Type, 0, ""}, + {"Hostent.AddrList", Field, 0, ""}, + {"Hostent.AddrType", Field, 0, ""}, + {"Hostent.Aliases", Field, 0, ""}, + {"Hostent.Length", Field, 0, ""}, + {"Hostent.Name", Field, 0, ""}, + {"ICANON", Const, 0, ""}, + {"ICMP6_FILTER", Const, 2, ""}, + {"ICMPV6_FILTER", Const, 2, ""}, + {"ICMPv6Filter", Type, 2, ""}, + {"ICMPv6Filter.Data", Field, 2, ""}, + {"ICMPv6Filter.Filt", Field, 2, ""}, + {"ICRNL", Const, 0, ""}, + {"IEXTEN", Const, 0, ""}, + {"IFAN_ARRIVAL", Const, 1, ""}, + {"IFAN_DEPARTURE", Const, 1, ""}, + {"IFA_ADDRESS", Const, 0, ""}, + {"IFA_ANYCAST", Const, 0, ""}, + {"IFA_BROADCAST", Const, 0, ""}, + {"IFA_CACHEINFO", Const, 0, ""}, + {"IFA_F_DADFAILED", Const, 0, ""}, + {"IFA_F_DEPRECATED", Const, 0, ""}, + {"IFA_F_HOMEADDRESS", Const, 0, ""}, + {"IFA_F_NODAD", Const, 0, ""}, + {"IFA_F_OPTIMISTIC", Const, 0, ""}, + {"IFA_F_PERMANENT", Const, 0, ""}, + {"IFA_F_SECONDARY", Const, 0, ""}, + {"IFA_F_TEMPORARY", Const, 0, ""}, + {"IFA_F_TENTATIVE", Const, 0, ""}, + {"IFA_LABEL", Const, 0, ""}, + {"IFA_LOCAL", Const, 0, ""}, + {"IFA_MAX", Const, 0, ""}, + {"IFA_MULTICAST", Const, 0, ""}, + {"IFA_ROUTE", Const, 1, ""}, + {"IFA_UNSPEC", Const, 0, ""}, + {"IFF_ALLMULTI", Const, 0, ""}, + {"IFF_ALTPHYS", Const, 0, ""}, + {"IFF_AUTOMEDIA", Const, 0, ""}, + {"IFF_BROADCAST", Const, 0, ""}, + {"IFF_CANTCHANGE", Const, 0, ""}, + {"IFF_CANTCONFIG", Const, 1, ""}, + {"IFF_DEBUG", Const, 0, ""}, + {"IFF_DRV_OACTIVE", Const, 0, ""}, + {"IFF_DRV_RUNNING", Const, 0, ""}, + {"IFF_DYING", Const, 0, ""}, + {"IFF_DYNAMIC", Const, 0, ""}, + {"IFF_LINK0", Const, 0, ""}, + {"IFF_LINK1", Const, 0, ""}, + {"IFF_LINK2", Const, 0, ""}, + {"IFF_LOOPBACK", Const, 0, ""}, + {"IFF_MASTER", Const, 0, ""}, + {"IFF_MONITOR", Const, 0, ""}, + {"IFF_MULTICAST", Const, 0, ""}, + {"IFF_NOARP", Const, 0, ""}, + {"IFF_NOTRAILERS", Const, 0, ""}, + {"IFF_NO_PI", Const, 0, ""}, + {"IFF_OACTIVE", Const, 0, ""}, + {"IFF_ONE_QUEUE", Const, 0, ""}, + {"IFF_POINTOPOINT", Const, 0, ""}, + {"IFF_POINTTOPOINT", Const, 0, ""}, + {"IFF_PORTSEL", Const, 0, ""}, + {"IFF_PPROMISC", Const, 0, ""}, + {"IFF_PROMISC", Const, 0, ""}, + {"IFF_RENAMING", Const, 0, ""}, + {"IFF_RUNNING", Const, 0, ""}, + {"IFF_SIMPLEX", Const, 0, ""}, + {"IFF_SLAVE", Const, 0, ""}, + {"IFF_SMART", Const, 0, ""}, + {"IFF_STATICARP", Const, 0, ""}, + {"IFF_TAP", Const, 0, ""}, + {"IFF_TUN", Const, 0, ""}, + {"IFF_TUN_EXCL", Const, 0, ""}, + {"IFF_UP", Const, 0, ""}, + {"IFF_VNET_HDR", Const, 0, ""}, + {"IFLA_ADDRESS", Const, 0, ""}, + {"IFLA_BROADCAST", Const, 0, ""}, + {"IFLA_COST", Const, 0, ""}, + {"IFLA_IFALIAS", Const, 0, ""}, + {"IFLA_IFNAME", Const, 0, ""}, + {"IFLA_LINK", Const, 0, ""}, + {"IFLA_LINKINFO", Const, 0, ""}, + {"IFLA_LINKMODE", Const, 0, ""}, + {"IFLA_MAP", Const, 0, ""}, + {"IFLA_MASTER", Const, 0, ""}, + {"IFLA_MAX", Const, 0, ""}, + {"IFLA_MTU", Const, 0, ""}, + {"IFLA_NET_NS_PID", Const, 0, ""}, + {"IFLA_OPERSTATE", Const, 0, ""}, + {"IFLA_PRIORITY", Const, 0, ""}, + {"IFLA_PROTINFO", Const, 0, ""}, + {"IFLA_QDISC", Const, 0, ""}, + {"IFLA_STATS", Const, 0, ""}, + {"IFLA_TXQLEN", Const, 0, ""}, + {"IFLA_UNSPEC", Const, 0, ""}, + {"IFLA_WEIGHT", Const, 0, ""}, + {"IFLA_WIRELESS", Const, 0, ""}, + {"IFNAMSIZ", Const, 0, ""}, + {"IFT_1822", Const, 0, ""}, + {"IFT_A12MPPSWITCH", Const, 0, ""}, + {"IFT_AAL2", Const, 0, ""}, + {"IFT_AAL5", Const, 0, ""}, + {"IFT_ADSL", Const, 0, ""}, + {"IFT_AFLANE8023", Const, 0, ""}, + {"IFT_AFLANE8025", Const, 0, ""}, + {"IFT_ARAP", Const, 0, ""}, + {"IFT_ARCNET", Const, 0, ""}, + {"IFT_ARCNETPLUS", Const, 0, ""}, + {"IFT_ASYNC", Const, 0, ""}, + {"IFT_ATM", Const, 0, ""}, + {"IFT_ATMDXI", Const, 0, ""}, + {"IFT_ATMFUNI", Const, 0, ""}, + {"IFT_ATMIMA", Const, 0, ""}, + {"IFT_ATMLOGICAL", Const, 0, ""}, + {"IFT_ATMRADIO", Const, 0, ""}, + {"IFT_ATMSUBINTERFACE", Const, 0, ""}, + {"IFT_ATMVCIENDPT", Const, 0, ""}, + {"IFT_ATMVIRTUAL", Const, 0, ""}, + {"IFT_BGPPOLICYACCOUNTING", Const, 0, ""}, + {"IFT_BLUETOOTH", Const, 1, ""}, + {"IFT_BRIDGE", Const, 0, ""}, + {"IFT_BSC", Const, 0, ""}, + {"IFT_CARP", Const, 0, ""}, + {"IFT_CCTEMUL", Const, 0, ""}, + {"IFT_CELLULAR", Const, 0, ""}, + {"IFT_CEPT", Const, 0, ""}, + {"IFT_CES", Const, 0, ""}, + {"IFT_CHANNEL", Const, 0, ""}, + {"IFT_CNR", Const, 0, ""}, + {"IFT_COFFEE", Const, 0, ""}, + {"IFT_COMPOSITELINK", Const, 0, ""}, + {"IFT_DCN", Const, 0, ""}, + {"IFT_DIGITALPOWERLINE", Const, 0, ""}, + {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0, ""}, + {"IFT_DLSW", Const, 0, ""}, + {"IFT_DOCSCABLEDOWNSTREAM", Const, 0, ""}, + {"IFT_DOCSCABLEMACLAYER", Const, 0, ""}, + {"IFT_DOCSCABLEUPSTREAM", Const, 0, ""}, + {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1, ""}, + {"IFT_DS0", Const, 0, ""}, + {"IFT_DS0BUNDLE", Const, 0, ""}, + {"IFT_DS1FDL", Const, 0, ""}, + {"IFT_DS3", Const, 0, ""}, + {"IFT_DTM", Const, 0, ""}, + {"IFT_DUMMY", Const, 1, ""}, + {"IFT_DVBASILN", Const, 0, ""}, + {"IFT_DVBASIOUT", Const, 0, ""}, + {"IFT_DVBRCCDOWNSTREAM", Const, 0, ""}, + {"IFT_DVBRCCMACLAYER", Const, 0, ""}, + {"IFT_DVBRCCUPSTREAM", Const, 0, ""}, + {"IFT_ECONET", Const, 1, ""}, + {"IFT_ENC", Const, 0, ""}, + {"IFT_EON", Const, 0, ""}, + {"IFT_EPLRS", Const, 0, ""}, + {"IFT_ESCON", Const, 0, ""}, + {"IFT_ETHER", Const, 0, ""}, + {"IFT_FAITH", Const, 0, ""}, + {"IFT_FAST", Const, 0, ""}, + {"IFT_FASTETHER", Const, 0, ""}, + {"IFT_FASTETHERFX", Const, 0, ""}, + {"IFT_FDDI", Const, 0, ""}, + {"IFT_FIBRECHANNEL", Const, 0, ""}, + {"IFT_FRAMERELAYINTERCONNECT", Const, 0, ""}, + {"IFT_FRAMERELAYMPI", Const, 0, ""}, + {"IFT_FRDLCIENDPT", Const, 0, ""}, + {"IFT_FRELAY", Const, 0, ""}, + {"IFT_FRELAYDCE", Const, 0, ""}, + {"IFT_FRF16MFRBUNDLE", Const, 0, ""}, + {"IFT_FRFORWARD", Const, 0, ""}, + {"IFT_G703AT2MB", Const, 0, ""}, + {"IFT_G703AT64K", Const, 0, ""}, + {"IFT_GIF", Const, 0, ""}, + {"IFT_GIGABITETHERNET", Const, 0, ""}, + {"IFT_GR303IDT", Const, 0, ""}, + {"IFT_GR303RDT", Const, 0, ""}, + {"IFT_H323GATEKEEPER", Const, 0, ""}, + {"IFT_H323PROXY", Const, 0, ""}, + {"IFT_HDH1822", Const, 0, ""}, + {"IFT_HDLC", Const, 0, ""}, + {"IFT_HDSL2", Const, 0, ""}, + {"IFT_HIPERLAN2", Const, 0, ""}, + {"IFT_HIPPI", Const, 0, ""}, + {"IFT_HIPPIINTERFACE", Const, 0, ""}, + {"IFT_HOSTPAD", Const, 0, ""}, + {"IFT_HSSI", Const, 0, ""}, + {"IFT_HY", Const, 0, ""}, + {"IFT_IBM370PARCHAN", Const, 0, ""}, + {"IFT_IDSL", Const, 0, ""}, + {"IFT_IEEE1394", Const, 0, ""}, + {"IFT_IEEE80211", Const, 0, ""}, + {"IFT_IEEE80212", Const, 0, ""}, + {"IFT_IEEE8023ADLAG", Const, 0, ""}, + {"IFT_IFGSN", Const, 0, ""}, + {"IFT_IMT", Const, 0, ""}, + {"IFT_INFINIBAND", Const, 1, ""}, + {"IFT_INTERLEAVE", Const, 0, ""}, + {"IFT_IP", Const, 0, ""}, + {"IFT_IPFORWARD", Const, 0, ""}, + {"IFT_IPOVERATM", Const, 0, ""}, + {"IFT_IPOVERCDLC", Const, 0, ""}, + {"IFT_IPOVERCLAW", Const, 0, ""}, + {"IFT_IPSWITCH", Const, 0, ""}, + {"IFT_IPXIP", Const, 0, ""}, + {"IFT_ISDN", Const, 0, ""}, + {"IFT_ISDNBASIC", Const, 0, ""}, + {"IFT_ISDNPRIMARY", Const, 0, ""}, + {"IFT_ISDNS", Const, 0, ""}, + {"IFT_ISDNU", Const, 0, ""}, + {"IFT_ISO88022LLC", Const, 0, ""}, + {"IFT_ISO88023", Const, 0, ""}, + {"IFT_ISO88024", Const, 0, ""}, + {"IFT_ISO88025", Const, 0, ""}, + {"IFT_ISO88025CRFPINT", Const, 0, ""}, + {"IFT_ISO88025DTR", Const, 0, ""}, + {"IFT_ISO88025FIBER", Const, 0, ""}, + {"IFT_ISO88026", Const, 0, ""}, + {"IFT_ISUP", Const, 0, ""}, + {"IFT_L2VLAN", Const, 0, ""}, + {"IFT_L3IPVLAN", Const, 0, ""}, + {"IFT_L3IPXVLAN", Const, 0, ""}, + {"IFT_LAPB", Const, 0, ""}, + {"IFT_LAPD", Const, 0, ""}, + {"IFT_LAPF", Const, 0, ""}, + {"IFT_LINEGROUP", Const, 1, ""}, + {"IFT_LOCALTALK", Const, 0, ""}, + {"IFT_LOOP", Const, 0, ""}, + {"IFT_MEDIAMAILOVERIP", Const, 0, ""}, + {"IFT_MFSIGLINK", Const, 0, ""}, + {"IFT_MIOX25", Const, 0, ""}, + {"IFT_MODEM", Const, 0, ""}, + {"IFT_MPC", Const, 0, ""}, + {"IFT_MPLS", Const, 0, ""}, + {"IFT_MPLSTUNNEL", Const, 0, ""}, + {"IFT_MSDSL", Const, 0, ""}, + {"IFT_MVL", Const, 0, ""}, + {"IFT_MYRINET", Const, 0, ""}, + {"IFT_NFAS", Const, 0, ""}, + {"IFT_NSIP", Const, 0, ""}, + {"IFT_OPTICALCHANNEL", Const, 0, ""}, + {"IFT_OPTICALTRANSPORT", Const, 0, ""}, + {"IFT_OTHER", Const, 0, ""}, + {"IFT_P10", Const, 0, ""}, + {"IFT_P80", Const, 0, ""}, + {"IFT_PARA", Const, 0, ""}, + {"IFT_PDP", Const, 0, ""}, + {"IFT_PFLOG", Const, 0, ""}, + {"IFT_PFLOW", Const, 1, ""}, + {"IFT_PFSYNC", Const, 0, ""}, + {"IFT_PLC", Const, 0, ""}, + {"IFT_PON155", Const, 1, ""}, + {"IFT_PON622", Const, 1, ""}, + {"IFT_POS", Const, 0, ""}, + {"IFT_PPP", Const, 0, ""}, + {"IFT_PPPMULTILINKBUNDLE", Const, 0, ""}, + {"IFT_PROPATM", Const, 1, ""}, + {"IFT_PROPBWAP2MP", Const, 0, ""}, + {"IFT_PROPCNLS", Const, 0, ""}, + {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0, ""}, + {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0, ""}, + {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0, ""}, + {"IFT_PROPMUX", Const, 0, ""}, + {"IFT_PROPVIRTUAL", Const, 0, ""}, + {"IFT_PROPWIRELESSP2P", Const, 0, ""}, + {"IFT_PTPSERIAL", Const, 0, ""}, + {"IFT_PVC", Const, 0, ""}, + {"IFT_Q2931", Const, 1, ""}, + {"IFT_QLLC", Const, 0, ""}, + {"IFT_RADIOMAC", Const, 0, ""}, + {"IFT_RADSL", Const, 0, ""}, + {"IFT_REACHDSL", Const, 0, ""}, + {"IFT_RFC1483", Const, 0, ""}, + {"IFT_RS232", Const, 0, ""}, + {"IFT_RSRB", Const, 0, ""}, + {"IFT_SDLC", Const, 0, ""}, + {"IFT_SDSL", Const, 0, ""}, + {"IFT_SHDSL", Const, 0, ""}, + {"IFT_SIP", Const, 0, ""}, + {"IFT_SIPSIG", Const, 1, ""}, + {"IFT_SIPTG", Const, 1, ""}, + {"IFT_SLIP", Const, 0, ""}, + {"IFT_SMDSDXI", Const, 0, ""}, + {"IFT_SMDSICIP", Const, 0, ""}, + {"IFT_SONET", Const, 0, ""}, + {"IFT_SONETOVERHEADCHANNEL", Const, 0, ""}, + {"IFT_SONETPATH", Const, 0, ""}, + {"IFT_SONETVT", Const, 0, ""}, + {"IFT_SRP", Const, 0, ""}, + {"IFT_SS7SIGLINK", Const, 0, ""}, + {"IFT_STACKTOSTACK", Const, 0, ""}, + {"IFT_STARLAN", Const, 0, ""}, + {"IFT_STF", Const, 0, ""}, + {"IFT_T1", Const, 0, ""}, + {"IFT_TDLC", Const, 0, ""}, + {"IFT_TELINK", Const, 1, ""}, + {"IFT_TERMPAD", Const, 0, ""}, + {"IFT_TR008", Const, 0, ""}, + {"IFT_TRANSPHDLC", Const, 0, ""}, + {"IFT_TUNNEL", Const, 0, ""}, + {"IFT_ULTRA", Const, 0, ""}, + {"IFT_USB", Const, 0, ""}, + {"IFT_V11", Const, 0, ""}, + {"IFT_V35", Const, 0, ""}, + {"IFT_V36", Const, 0, ""}, + {"IFT_V37", Const, 0, ""}, + {"IFT_VDSL", Const, 0, ""}, + {"IFT_VIRTUALIPADDRESS", Const, 0, ""}, + {"IFT_VIRTUALTG", Const, 1, ""}, + {"IFT_VOICEDID", Const, 1, ""}, + {"IFT_VOICEEM", Const, 0, ""}, + {"IFT_VOICEEMFGD", Const, 1, ""}, + {"IFT_VOICEENCAP", Const, 0, ""}, + {"IFT_VOICEFGDEANA", Const, 1, ""}, + {"IFT_VOICEFXO", Const, 0, ""}, + {"IFT_VOICEFXS", Const, 0, ""}, + {"IFT_VOICEOVERATM", Const, 0, ""}, + {"IFT_VOICEOVERCABLE", Const, 1, ""}, + {"IFT_VOICEOVERFRAMERELAY", Const, 0, ""}, + {"IFT_VOICEOVERIP", Const, 0, ""}, + {"IFT_X213", Const, 0, ""}, + {"IFT_X25", Const, 0, ""}, + {"IFT_X25DDN", Const, 0, ""}, + {"IFT_X25HUNTGROUP", Const, 0, ""}, + {"IFT_X25MLP", Const, 0, ""}, + {"IFT_X25PLE", Const, 0, ""}, + {"IFT_XETHER", Const, 0, ""}, + {"IGNBRK", Const, 0, ""}, + {"IGNCR", Const, 0, ""}, + {"IGNORE", Const, 0, ""}, + {"IGNPAR", Const, 0, ""}, + {"IMAXBEL", Const, 0, ""}, + {"INFINITE", Const, 0, ""}, + {"INLCR", Const, 0, ""}, + {"INPCK", Const, 0, ""}, + {"INVALID_FILE_ATTRIBUTES", Const, 0, ""}, + {"IN_ACCESS", Const, 0, ""}, + {"IN_ALL_EVENTS", Const, 0, ""}, + {"IN_ATTRIB", Const, 0, ""}, + {"IN_CLASSA_HOST", Const, 0, ""}, + {"IN_CLASSA_MAX", Const, 0, ""}, + {"IN_CLASSA_NET", Const, 0, ""}, + {"IN_CLASSA_NSHIFT", Const, 0, ""}, + {"IN_CLASSB_HOST", Const, 0, ""}, + {"IN_CLASSB_MAX", Const, 0, ""}, + {"IN_CLASSB_NET", Const, 0, ""}, + {"IN_CLASSB_NSHIFT", Const, 0, ""}, + {"IN_CLASSC_HOST", Const, 0, ""}, + {"IN_CLASSC_NET", Const, 0, ""}, + {"IN_CLASSC_NSHIFT", Const, 0, ""}, + {"IN_CLASSD_HOST", Const, 0, ""}, + {"IN_CLASSD_NET", Const, 0, ""}, + {"IN_CLASSD_NSHIFT", Const, 0, ""}, + {"IN_CLOEXEC", Const, 0, ""}, + {"IN_CLOSE", Const, 0, ""}, + {"IN_CLOSE_NOWRITE", Const, 0, ""}, + {"IN_CLOSE_WRITE", Const, 0, ""}, + {"IN_CREATE", Const, 0, ""}, + {"IN_DELETE", Const, 0, ""}, + {"IN_DELETE_SELF", Const, 0, ""}, + {"IN_DONT_FOLLOW", Const, 0, ""}, + {"IN_EXCL_UNLINK", Const, 0, ""}, + {"IN_IGNORED", Const, 0, ""}, + {"IN_ISDIR", Const, 0, ""}, + {"IN_LINKLOCALNETNUM", Const, 0, ""}, + {"IN_LOOPBACKNET", Const, 0, ""}, + {"IN_MASK_ADD", Const, 0, ""}, + {"IN_MODIFY", Const, 0, ""}, + {"IN_MOVE", Const, 0, ""}, + {"IN_MOVED_FROM", Const, 0, ""}, + {"IN_MOVED_TO", Const, 0, ""}, + {"IN_MOVE_SELF", Const, 0, ""}, + {"IN_NONBLOCK", Const, 0, ""}, + {"IN_ONESHOT", Const, 0, ""}, + {"IN_ONLYDIR", Const, 0, ""}, + {"IN_OPEN", Const, 0, ""}, + {"IN_Q_OVERFLOW", Const, 0, ""}, + {"IN_RFC3021_HOST", Const, 1, ""}, + {"IN_RFC3021_MASK", Const, 1, ""}, + {"IN_RFC3021_NET", Const, 1, ""}, + {"IN_RFC3021_NSHIFT", Const, 1, ""}, + {"IN_UNMOUNT", Const, 0, ""}, + {"IOC_IN", Const, 1, ""}, + {"IOC_INOUT", Const, 1, ""}, + {"IOC_OUT", Const, 1, ""}, + {"IOC_VENDOR", Const, 3, ""}, + {"IOC_WS2", Const, 1, ""}, + {"IO_REPARSE_TAG_SYMLINK", Const, 4, ""}, + {"IPMreq", Type, 0, ""}, + {"IPMreq.Interface", Field, 0, ""}, + {"IPMreq.Multiaddr", Field, 0, ""}, + {"IPMreqn", Type, 0, ""}, + {"IPMreqn.Address", Field, 0, ""}, + {"IPMreqn.Ifindex", Field, 0, ""}, + {"IPMreqn.Multiaddr", Field, 0, ""}, + {"IPPROTO_3PC", Const, 0, ""}, + {"IPPROTO_ADFS", Const, 0, ""}, + {"IPPROTO_AH", Const, 0, ""}, + {"IPPROTO_AHIP", Const, 0, ""}, + {"IPPROTO_APES", Const, 0, ""}, + {"IPPROTO_ARGUS", Const, 0, ""}, + {"IPPROTO_AX25", Const, 0, ""}, + {"IPPROTO_BHA", Const, 0, ""}, + {"IPPROTO_BLT", Const, 0, ""}, + {"IPPROTO_BRSATMON", Const, 0, ""}, + {"IPPROTO_CARP", Const, 0, ""}, + {"IPPROTO_CFTP", Const, 0, ""}, + {"IPPROTO_CHAOS", Const, 0, ""}, + {"IPPROTO_CMTP", Const, 0, ""}, + {"IPPROTO_COMP", Const, 0, ""}, + {"IPPROTO_CPHB", Const, 0, ""}, + {"IPPROTO_CPNX", Const, 0, ""}, + {"IPPROTO_DCCP", Const, 0, ""}, + {"IPPROTO_DDP", Const, 0, ""}, + {"IPPROTO_DGP", Const, 0, ""}, + {"IPPROTO_DIVERT", Const, 0, ""}, + {"IPPROTO_DIVERT_INIT", Const, 3, ""}, + {"IPPROTO_DIVERT_RESP", Const, 3, ""}, + {"IPPROTO_DONE", Const, 0, ""}, + {"IPPROTO_DSTOPTS", Const, 0, ""}, + {"IPPROTO_EGP", Const, 0, ""}, + {"IPPROTO_EMCON", Const, 0, ""}, + {"IPPROTO_ENCAP", Const, 0, ""}, + {"IPPROTO_EON", Const, 0, ""}, + {"IPPROTO_ESP", Const, 0, ""}, + {"IPPROTO_ETHERIP", Const, 0, ""}, + {"IPPROTO_FRAGMENT", Const, 0, ""}, + {"IPPROTO_GGP", Const, 0, ""}, + {"IPPROTO_GMTP", Const, 0, ""}, + {"IPPROTO_GRE", Const, 0, ""}, + {"IPPROTO_HELLO", Const, 0, ""}, + {"IPPROTO_HMP", Const, 0, ""}, + {"IPPROTO_HOPOPTS", Const, 0, ""}, + {"IPPROTO_ICMP", Const, 0, ""}, + {"IPPROTO_ICMPV6", Const, 0, ""}, + {"IPPROTO_IDP", Const, 0, ""}, + {"IPPROTO_IDPR", Const, 0, ""}, + {"IPPROTO_IDRP", Const, 0, ""}, + {"IPPROTO_IGMP", Const, 0, ""}, + {"IPPROTO_IGP", Const, 0, ""}, + {"IPPROTO_IGRP", Const, 0, ""}, + {"IPPROTO_IL", Const, 0, ""}, + {"IPPROTO_INLSP", Const, 0, ""}, + {"IPPROTO_INP", Const, 0, ""}, + {"IPPROTO_IP", Const, 0, ""}, + {"IPPROTO_IPCOMP", Const, 0, ""}, + {"IPPROTO_IPCV", Const, 0, ""}, + {"IPPROTO_IPEIP", Const, 0, ""}, + {"IPPROTO_IPIP", Const, 0, ""}, + {"IPPROTO_IPPC", Const, 0, ""}, + {"IPPROTO_IPV4", Const, 0, ""}, + {"IPPROTO_IPV6", Const, 0, ""}, + {"IPPROTO_IPV6_ICMP", Const, 1, ""}, + {"IPPROTO_IRTP", Const, 0, ""}, + {"IPPROTO_KRYPTOLAN", Const, 0, ""}, + {"IPPROTO_LARP", Const, 0, ""}, + {"IPPROTO_LEAF1", Const, 0, ""}, + {"IPPROTO_LEAF2", Const, 0, ""}, + {"IPPROTO_MAX", Const, 0, ""}, + {"IPPROTO_MAXID", Const, 0, ""}, + {"IPPROTO_MEAS", Const, 0, ""}, + {"IPPROTO_MH", Const, 1, ""}, + {"IPPROTO_MHRP", Const, 0, ""}, + {"IPPROTO_MICP", Const, 0, ""}, + {"IPPROTO_MOBILE", Const, 0, ""}, + {"IPPROTO_MPLS", Const, 1, ""}, + {"IPPROTO_MTP", Const, 0, ""}, + {"IPPROTO_MUX", Const, 0, ""}, + {"IPPROTO_ND", Const, 0, ""}, + {"IPPROTO_NHRP", Const, 0, ""}, + {"IPPROTO_NONE", Const, 0, ""}, + {"IPPROTO_NSP", Const, 0, ""}, + {"IPPROTO_NVPII", Const, 0, ""}, + {"IPPROTO_OLD_DIVERT", Const, 0, ""}, + {"IPPROTO_OSPFIGP", Const, 0, ""}, + {"IPPROTO_PFSYNC", Const, 0, ""}, + {"IPPROTO_PGM", Const, 0, ""}, + {"IPPROTO_PIGP", Const, 0, ""}, + {"IPPROTO_PIM", Const, 0, ""}, + {"IPPROTO_PRM", Const, 0, ""}, + {"IPPROTO_PUP", Const, 0, ""}, + {"IPPROTO_PVP", Const, 0, ""}, + {"IPPROTO_RAW", Const, 0, ""}, + {"IPPROTO_RCCMON", Const, 0, ""}, + {"IPPROTO_RDP", Const, 0, ""}, + {"IPPROTO_ROUTING", Const, 0, ""}, + {"IPPROTO_RSVP", Const, 0, ""}, + {"IPPROTO_RVD", Const, 0, ""}, + {"IPPROTO_SATEXPAK", Const, 0, ""}, + {"IPPROTO_SATMON", Const, 0, ""}, + {"IPPROTO_SCCSP", Const, 0, ""}, + {"IPPROTO_SCTP", Const, 0, ""}, + {"IPPROTO_SDRP", Const, 0, ""}, + {"IPPROTO_SEND", Const, 1, ""}, + {"IPPROTO_SEP", Const, 0, ""}, + {"IPPROTO_SKIP", Const, 0, ""}, + {"IPPROTO_SPACER", Const, 0, ""}, + {"IPPROTO_SRPC", Const, 0, ""}, + {"IPPROTO_ST", Const, 0, ""}, + {"IPPROTO_SVMTP", Const, 0, ""}, + {"IPPROTO_SWIPE", Const, 0, ""}, + {"IPPROTO_TCF", Const, 0, ""}, + {"IPPROTO_TCP", Const, 0, ""}, + {"IPPROTO_TLSP", Const, 0, ""}, + {"IPPROTO_TP", Const, 0, ""}, + {"IPPROTO_TPXX", Const, 0, ""}, + {"IPPROTO_TRUNK1", Const, 0, ""}, + {"IPPROTO_TRUNK2", Const, 0, ""}, + {"IPPROTO_TTP", Const, 0, ""}, + {"IPPROTO_UDP", Const, 0, ""}, + {"IPPROTO_UDPLITE", Const, 0, ""}, + {"IPPROTO_VINES", Const, 0, ""}, + {"IPPROTO_VISA", Const, 0, ""}, + {"IPPROTO_VMTP", Const, 0, ""}, + {"IPPROTO_VRRP", Const, 1, ""}, + {"IPPROTO_WBEXPAK", Const, 0, ""}, + {"IPPROTO_WBMON", Const, 0, ""}, + {"IPPROTO_WSN", Const, 0, ""}, + {"IPPROTO_XNET", Const, 0, ""}, + {"IPPROTO_XTP", Const, 0, ""}, + {"IPV6_2292DSTOPTS", Const, 0, ""}, + {"IPV6_2292HOPLIMIT", Const, 0, ""}, + {"IPV6_2292HOPOPTS", Const, 0, ""}, + {"IPV6_2292NEXTHOP", Const, 0, ""}, + {"IPV6_2292PKTINFO", Const, 0, ""}, + {"IPV6_2292PKTOPTIONS", Const, 0, ""}, + {"IPV6_2292RTHDR", Const, 0, ""}, + {"IPV6_ADDRFORM", Const, 0, ""}, + {"IPV6_ADD_MEMBERSHIP", Const, 0, ""}, + {"IPV6_AUTHHDR", Const, 0, ""}, + {"IPV6_AUTH_LEVEL", Const, 1, ""}, + {"IPV6_AUTOFLOWLABEL", Const, 0, ""}, + {"IPV6_BINDANY", Const, 0, ""}, + {"IPV6_BINDV6ONLY", Const, 0, ""}, + {"IPV6_BOUND_IF", Const, 0, ""}, + {"IPV6_CHECKSUM", Const, 0, ""}, + {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0, ""}, + {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0, ""}, + {"IPV6_DEFHLIM", Const, 0, ""}, + {"IPV6_DONTFRAG", Const, 0, ""}, + {"IPV6_DROP_MEMBERSHIP", Const, 0, ""}, + {"IPV6_DSTOPTS", Const, 0, ""}, + {"IPV6_ESP_NETWORK_LEVEL", Const, 1, ""}, + {"IPV6_ESP_TRANS_LEVEL", Const, 1, ""}, + {"IPV6_FAITH", Const, 0, ""}, + {"IPV6_FLOWINFO_MASK", Const, 0, ""}, + {"IPV6_FLOWLABEL_MASK", Const, 0, ""}, + {"IPV6_FRAGTTL", Const, 0, ""}, + {"IPV6_FW_ADD", Const, 0, ""}, + {"IPV6_FW_DEL", Const, 0, ""}, + {"IPV6_FW_FLUSH", Const, 0, ""}, + {"IPV6_FW_GET", Const, 0, ""}, + {"IPV6_FW_ZERO", Const, 0, ""}, + {"IPV6_HLIMDEC", Const, 0, ""}, + {"IPV6_HOPLIMIT", Const, 0, ""}, + {"IPV6_HOPOPTS", Const, 0, ""}, + {"IPV6_IPCOMP_LEVEL", Const, 1, ""}, + {"IPV6_IPSEC_POLICY", Const, 0, ""}, + {"IPV6_JOIN_ANYCAST", Const, 0, ""}, + {"IPV6_JOIN_GROUP", Const, 0, ""}, + {"IPV6_LEAVE_ANYCAST", Const, 0, ""}, + {"IPV6_LEAVE_GROUP", Const, 0, ""}, + {"IPV6_MAXHLIM", Const, 0, ""}, + {"IPV6_MAXOPTHDR", Const, 0, ""}, + {"IPV6_MAXPACKET", Const, 0, ""}, + {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0, ""}, + {"IPV6_MAX_MEMBERSHIPS", Const, 0, ""}, + {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0, ""}, + {"IPV6_MIN_MEMBERSHIPS", Const, 0, ""}, + {"IPV6_MMTU", Const, 0, ""}, + {"IPV6_MSFILTER", Const, 0, ""}, + {"IPV6_MTU", Const, 0, ""}, + {"IPV6_MTU_DISCOVER", Const, 0, ""}, + {"IPV6_MULTICAST_HOPS", Const, 0, ""}, + {"IPV6_MULTICAST_IF", Const, 0, ""}, + {"IPV6_MULTICAST_LOOP", Const, 0, ""}, + {"IPV6_NEXTHOP", Const, 0, ""}, + {"IPV6_OPTIONS", Const, 1, ""}, + {"IPV6_PATHMTU", Const, 0, ""}, + {"IPV6_PIPEX", Const, 1, ""}, + {"IPV6_PKTINFO", Const, 0, ""}, + {"IPV6_PMTUDISC_DO", Const, 0, ""}, + {"IPV6_PMTUDISC_DONT", Const, 0, ""}, + {"IPV6_PMTUDISC_PROBE", Const, 0, ""}, + {"IPV6_PMTUDISC_WANT", Const, 0, ""}, + {"IPV6_PORTRANGE", Const, 0, ""}, + {"IPV6_PORTRANGE_DEFAULT", Const, 0, ""}, + {"IPV6_PORTRANGE_HIGH", Const, 0, ""}, + {"IPV6_PORTRANGE_LOW", Const, 0, ""}, + {"IPV6_PREFER_TEMPADDR", Const, 0, ""}, + {"IPV6_RECVDSTOPTS", Const, 0, ""}, + {"IPV6_RECVDSTPORT", Const, 3, ""}, + {"IPV6_RECVERR", Const, 0, ""}, + {"IPV6_RECVHOPLIMIT", Const, 0, ""}, + {"IPV6_RECVHOPOPTS", Const, 0, ""}, + {"IPV6_RECVPATHMTU", Const, 0, ""}, + {"IPV6_RECVPKTINFO", Const, 0, ""}, + {"IPV6_RECVRTHDR", Const, 0, ""}, + {"IPV6_RECVTCLASS", Const, 0, ""}, + {"IPV6_ROUTER_ALERT", Const, 0, ""}, + {"IPV6_RTABLE", Const, 1, ""}, + {"IPV6_RTHDR", Const, 0, ""}, + {"IPV6_RTHDRDSTOPTS", Const, 0, ""}, + {"IPV6_RTHDR_LOOSE", Const, 0, ""}, + {"IPV6_RTHDR_STRICT", Const, 0, ""}, + {"IPV6_RTHDR_TYPE_0", Const, 0, ""}, + {"IPV6_RXDSTOPTS", Const, 0, ""}, + {"IPV6_RXHOPOPTS", Const, 0, ""}, + {"IPV6_SOCKOPT_RESERVED1", Const, 0, ""}, + {"IPV6_TCLASS", Const, 0, ""}, + {"IPV6_UNICAST_HOPS", Const, 0, ""}, + {"IPV6_USE_MIN_MTU", Const, 0, ""}, + {"IPV6_V6ONLY", Const, 0, ""}, + {"IPV6_VERSION", Const, 0, ""}, + {"IPV6_VERSION_MASK", Const, 0, ""}, + {"IPV6_XFRM_POLICY", Const, 0, ""}, + {"IP_ADD_MEMBERSHIP", Const, 0, ""}, + {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0, ""}, + {"IP_AUTH_LEVEL", Const, 1, ""}, + {"IP_BINDANY", Const, 0, ""}, + {"IP_BLOCK_SOURCE", Const, 0, ""}, + {"IP_BOUND_IF", Const, 0, ""}, + {"IP_DEFAULT_MULTICAST_LOOP", Const, 0, ""}, + {"IP_DEFAULT_MULTICAST_TTL", Const, 0, ""}, + {"IP_DF", Const, 0, ""}, + {"IP_DIVERTFL", Const, 3, ""}, + {"IP_DONTFRAG", Const, 0, ""}, + {"IP_DROP_MEMBERSHIP", Const, 0, ""}, + {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0, ""}, + {"IP_DUMMYNET3", Const, 0, ""}, + {"IP_DUMMYNET_CONFIGURE", Const, 0, ""}, + {"IP_DUMMYNET_DEL", Const, 0, ""}, + {"IP_DUMMYNET_FLUSH", Const, 0, ""}, + {"IP_DUMMYNET_GET", Const, 0, ""}, + {"IP_EF", Const, 1, ""}, + {"IP_ERRORMTU", Const, 1, ""}, + {"IP_ESP_NETWORK_LEVEL", Const, 1, ""}, + {"IP_ESP_TRANS_LEVEL", Const, 1, ""}, + {"IP_FAITH", Const, 0, ""}, + {"IP_FREEBIND", Const, 0, ""}, + {"IP_FW3", Const, 0, ""}, + {"IP_FW_ADD", Const, 0, ""}, + {"IP_FW_DEL", Const, 0, ""}, + {"IP_FW_FLUSH", Const, 0, ""}, + {"IP_FW_GET", Const, 0, ""}, + {"IP_FW_NAT_CFG", Const, 0, ""}, + {"IP_FW_NAT_DEL", Const, 0, ""}, + {"IP_FW_NAT_GET_CONFIG", Const, 0, ""}, + {"IP_FW_NAT_GET_LOG", Const, 0, ""}, + {"IP_FW_RESETLOG", Const, 0, ""}, + {"IP_FW_TABLE_ADD", Const, 0, ""}, + {"IP_FW_TABLE_DEL", Const, 0, ""}, + {"IP_FW_TABLE_FLUSH", Const, 0, ""}, + {"IP_FW_TABLE_GETSIZE", Const, 0, ""}, + {"IP_FW_TABLE_LIST", Const, 0, ""}, + {"IP_FW_ZERO", Const, 0, ""}, + {"IP_HDRINCL", Const, 0, ""}, + {"IP_IPCOMP_LEVEL", Const, 1, ""}, + {"IP_IPSECFLOWINFO", Const, 1, ""}, + {"IP_IPSEC_LOCAL_AUTH", Const, 1, ""}, + {"IP_IPSEC_LOCAL_CRED", Const, 1, ""}, + {"IP_IPSEC_LOCAL_ID", Const, 1, ""}, + {"IP_IPSEC_POLICY", Const, 0, ""}, + {"IP_IPSEC_REMOTE_AUTH", Const, 1, ""}, + {"IP_IPSEC_REMOTE_CRED", Const, 1, ""}, + {"IP_IPSEC_REMOTE_ID", Const, 1, ""}, + {"IP_MAXPACKET", Const, 0, ""}, + {"IP_MAX_GROUP_SRC_FILTER", Const, 0, ""}, + {"IP_MAX_MEMBERSHIPS", Const, 0, ""}, + {"IP_MAX_SOCK_MUTE_FILTER", Const, 0, ""}, + {"IP_MAX_SOCK_SRC_FILTER", Const, 0, ""}, + {"IP_MAX_SOURCE_FILTER", Const, 0, ""}, + {"IP_MF", Const, 0, ""}, + {"IP_MINFRAGSIZE", Const, 1, ""}, + {"IP_MINTTL", Const, 0, ""}, + {"IP_MIN_MEMBERSHIPS", Const, 0, ""}, + {"IP_MSFILTER", Const, 0, ""}, + {"IP_MSS", Const, 0, ""}, + {"IP_MTU", Const, 0, ""}, + {"IP_MTU_DISCOVER", Const, 0, ""}, + {"IP_MULTICAST_IF", Const, 0, ""}, + {"IP_MULTICAST_IFINDEX", Const, 0, ""}, + {"IP_MULTICAST_LOOP", Const, 0, ""}, + {"IP_MULTICAST_TTL", Const, 0, ""}, + {"IP_MULTICAST_VIF", Const, 0, ""}, + {"IP_NAT__XXX", Const, 0, ""}, + {"IP_OFFMASK", Const, 0, ""}, + {"IP_OLD_FW_ADD", Const, 0, ""}, + {"IP_OLD_FW_DEL", Const, 0, ""}, + {"IP_OLD_FW_FLUSH", Const, 0, ""}, + {"IP_OLD_FW_GET", Const, 0, ""}, + {"IP_OLD_FW_RESETLOG", Const, 0, ""}, + {"IP_OLD_FW_ZERO", Const, 0, ""}, + {"IP_ONESBCAST", Const, 0, ""}, + {"IP_OPTIONS", Const, 0, ""}, + {"IP_ORIGDSTADDR", Const, 0, ""}, + {"IP_PASSSEC", Const, 0, ""}, + {"IP_PIPEX", Const, 1, ""}, + {"IP_PKTINFO", Const, 0, ""}, + {"IP_PKTOPTIONS", Const, 0, ""}, + {"IP_PMTUDISC", Const, 0, ""}, + {"IP_PMTUDISC_DO", Const, 0, ""}, + {"IP_PMTUDISC_DONT", Const, 0, ""}, + {"IP_PMTUDISC_PROBE", Const, 0, ""}, + {"IP_PMTUDISC_WANT", Const, 0, ""}, + {"IP_PORTRANGE", Const, 0, ""}, + {"IP_PORTRANGE_DEFAULT", Const, 0, ""}, + {"IP_PORTRANGE_HIGH", Const, 0, ""}, + {"IP_PORTRANGE_LOW", Const, 0, ""}, + {"IP_RECVDSTADDR", Const, 0, ""}, + {"IP_RECVDSTPORT", Const, 1, ""}, + {"IP_RECVERR", Const, 0, ""}, + {"IP_RECVIF", Const, 0, ""}, + {"IP_RECVOPTS", Const, 0, ""}, + {"IP_RECVORIGDSTADDR", Const, 0, ""}, + {"IP_RECVPKTINFO", Const, 0, ""}, + {"IP_RECVRETOPTS", Const, 0, ""}, + {"IP_RECVRTABLE", Const, 1, ""}, + {"IP_RECVTOS", Const, 0, ""}, + {"IP_RECVTTL", Const, 0, ""}, + {"IP_RETOPTS", Const, 0, ""}, + {"IP_RF", Const, 0, ""}, + {"IP_ROUTER_ALERT", Const, 0, ""}, + {"IP_RSVP_OFF", Const, 0, ""}, + {"IP_RSVP_ON", Const, 0, ""}, + {"IP_RSVP_VIF_OFF", Const, 0, ""}, + {"IP_RSVP_VIF_ON", Const, 0, ""}, + {"IP_RTABLE", Const, 1, ""}, + {"IP_SENDSRCADDR", Const, 0, ""}, + {"IP_STRIPHDR", Const, 0, ""}, + {"IP_TOS", Const, 0, ""}, + {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0, ""}, + {"IP_TRANSPARENT", Const, 0, ""}, + {"IP_TTL", Const, 0, ""}, + {"IP_UNBLOCK_SOURCE", Const, 0, ""}, + {"IP_XFRM_POLICY", Const, 0, ""}, + {"IPv6MTUInfo", Type, 2, ""}, + {"IPv6MTUInfo.Addr", Field, 2, ""}, + {"IPv6MTUInfo.Mtu", Field, 2, ""}, + {"IPv6Mreq", Type, 0, ""}, + {"IPv6Mreq.Interface", Field, 0, ""}, + {"IPv6Mreq.Multiaddr", Field, 0, ""}, + {"ISIG", Const, 0, ""}, + {"ISTRIP", Const, 0, ""}, + {"IUCLC", Const, 0, ""}, + {"IUTF8", Const, 0, ""}, + {"IXANY", Const, 0, ""}, + {"IXOFF", Const, 0, ""}, + {"IXON", Const, 0, ""}, + {"IfAddrmsg", Type, 0, ""}, + {"IfAddrmsg.Family", Field, 0, ""}, + {"IfAddrmsg.Flags", Field, 0, ""}, + {"IfAddrmsg.Index", Field, 0, ""}, + {"IfAddrmsg.Prefixlen", Field, 0, ""}, + {"IfAddrmsg.Scope", Field, 0, ""}, + {"IfAnnounceMsghdr", Type, 1, ""}, + {"IfAnnounceMsghdr.Hdrlen", Field, 2, ""}, + {"IfAnnounceMsghdr.Index", Field, 1, ""}, + {"IfAnnounceMsghdr.Msglen", Field, 1, ""}, + {"IfAnnounceMsghdr.Name", Field, 1, ""}, + {"IfAnnounceMsghdr.Type", Field, 1, ""}, + {"IfAnnounceMsghdr.Version", Field, 1, ""}, + {"IfAnnounceMsghdr.What", Field, 1, ""}, + {"IfData", Type, 0, ""}, + {"IfData.Addrlen", Field, 0, ""}, + {"IfData.Baudrate", Field, 0, ""}, + {"IfData.Capabilities", Field, 2, ""}, + {"IfData.Collisions", Field, 0, ""}, + {"IfData.Datalen", Field, 0, ""}, + {"IfData.Epoch", Field, 0, ""}, + {"IfData.Hdrlen", Field, 0, ""}, + {"IfData.Hwassist", Field, 0, ""}, + {"IfData.Ibytes", Field, 0, ""}, + {"IfData.Ierrors", Field, 0, ""}, + {"IfData.Imcasts", Field, 0, ""}, + {"IfData.Ipackets", Field, 0, ""}, + {"IfData.Iqdrops", Field, 0, ""}, + {"IfData.Lastchange", Field, 0, ""}, + {"IfData.Link_state", Field, 0, ""}, + {"IfData.Mclpool", Field, 2, ""}, + {"IfData.Metric", Field, 0, ""}, + {"IfData.Mtu", Field, 0, ""}, + {"IfData.Noproto", Field, 0, ""}, + {"IfData.Obytes", Field, 0, ""}, + {"IfData.Oerrors", Field, 0, ""}, + {"IfData.Omcasts", Field, 0, ""}, + {"IfData.Opackets", Field, 0, ""}, + {"IfData.Pad", Field, 2, ""}, + {"IfData.Pad_cgo_0", Field, 2, ""}, + {"IfData.Pad_cgo_1", Field, 2, ""}, + {"IfData.Physical", Field, 0, ""}, + {"IfData.Recvquota", Field, 0, ""}, + {"IfData.Recvtiming", Field, 0, ""}, + {"IfData.Reserved1", Field, 0, ""}, + {"IfData.Reserved2", Field, 0, ""}, + {"IfData.Spare_char1", Field, 0, ""}, + {"IfData.Spare_char2", Field, 0, ""}, + {"IfData.Type", Field, 0, ""}, + {"IfData.Typelen", Field, 0, ""}, + {"IfData.Unused1", Field, 0, ""}, + {"IfData.Unused2", Field, 0, ""}, + {"IfData.Xmitquota", Field, 0, ""}, + {"IfData.Xmittiming", Field, 0, ""}, + {"IfInfomsg", Type, 0, ""}, + {"IfInfomsg.Change", Field, 0, ""}, + {"IfInfomsg.Family", Field, 0, ""}, + {"IfInfomsg.Flags", Field, 0, ""}, + {"IfInfomsg.Index", Field, 0, ""}, + {"IfInfomsg.Type", Field, 0, ""}, + {"IfInfomsg.X__ifi_pad", Field, 0, ""}, + {"IfMsghdr", Type, 0, ""}, + {"IfMsghdr.Addrs", Field, 0, ""}, + {"IfMsghdr.Data", Field, 0, ""}, + {"IfMsghdr.Flags", Field, 0, ""}, + {"IfMsghdr.Hdrlen", Field, 2, ""}, + {"IfMsghdr.Index", Field, 0, ""}, + {"IfMsghdr.Msglen", Field, 0, ""}, + {"IfMsghdr.Pad1", Field, 2, ""}, + {"IfMsghdr.Pad2", Field, 2, ""}, + {"IfMsghdr.Pad_cgo_0", Field, 0, ""}, + {"IfMsghdr.Pad_cgo_1", Field, 2, ""}, + {"IfMsghdr.Tableid", Field, 2, ""}, + {"IfMsghdr.Type", Field, 0, ""}, + {"IfMsghdr.Version", Field, 0, ""}, + {"IfMsghdr.Xflags", Field, 2, ""}, + {"IfaMsghdr", Type, 0, ""}, + {"IfaMsghdr.Addrs", Field, 0, ""}, + {"IfaMsghdr.Flags", Field, 0, ""}, + {"IfaMsghdr.Hdrlen", Field, 2, ""}, + {"IfaMsghdr.Index", Field, 0, ""}, + {"IfaMsghdr.Metric", Field, 0, ""}, + {"IfaMsghdr.Msglen", Field, 0, ""}, + {"IfaMsghdr.Pad1", Field, 2, ""}, + {"IfaMsghdr.Pad2", Field, 2, ""}, + {"IfaMsghdr.Pad_cgo_0", Field, 0, ""}, + {"IfaMsghdr.Tableid", Field, 2, ""}, + {"IfaMsghdr.Type", Field, 0, ""}, + {"IfaMsghdr.Version", Field, 0, ""}, + {"IfmaMsghdr", Type, 0, ""}, + {"IfmaMsghdr.Addrs", Field, 0, ""}, + {"IfmaMsghdr.Flags", Field, 0, ""}, + {"IfmaMsghdr.Index", Field, 0, ""}, + {"IfmaMsghdr.Msglen", Field, 0, ""}, + {"IfmaMsghdr.Pad_cgo_0", Field, 0, ""}, + {"IfmaMsghdr.Type", Field, 0, ""}, + {"IfmaMsghdr.Version", Field, 0, ""}, + {"IfmaMsghdr2", Type, 0, ""}, + {"IfmaMsghdr2.Addrs", Field, 0, ""}, + {"IfmaMsghdr2.Flags", Field, 0, ""}, + {"IfmaMsghdr2.Index", Field, 0, ""}, + {"IfmaMsghdr2.Msglen", Field, 0, ""}, + {"IfmaMsghdr2.Pad_cgo_0", Field, 0, ""}, + {"IfmaMsghdr2.Refcount", Field, 0, ""}, + {"IfmaMsghdr2.Type", Field, 0, ""}, + {"IfmaMsghdr2.Version", Field, 0, ""}, + {"ImplementsGetwd", Const, 0, ""}, + {"Inet4Pktinfo", Type, 0, ""}, + {"Inet4Pktinfo.Addr", Field, 0, ""}, + {"Inet4Pktinfo.Ifindex", Field, 0, ""}, + {"Inet4Pktinfo.Spec_dst", Field, 0, ""}, + {"Inet6Pktinfo", Type, 0, ""}, + {"Inet6Pktinfo.Addr", Field, 0, ""}, + {"Inet6Pktinfo.Ifindex", Field, 0, ""}, + {"InotifyAddWatch", Func, 0, "func(fd int, pathname string, mask uint32) (watchdesc int, err error)"}, + {"InotifyEvent", Type, 0, ""}, + {"InotifyEvent.Cookie", Field, 0, ""}, + {"InotifyEvent.Len", Field, 0, ""}, + {"InotifyEvent.Mask", Field, 0, ""}, + {"InotifyEvent.Name", Field, 0, ""}, + {"InotifyEvent.Wd", Field, 0, ""}, + {"InotifyInit", Func, 0, "func() (fd int, err error)"}, + {"InotifyInit1", Func, 0, "func(flags int) (fd int, err error)"}, + {"InotifyRmWatch", Func, 0, "func(fd int, watchdesc uint32) (success int, err error)"}, + {"InterfaceAddrMessage", Type, 0, ""}, + {"InterfaceAddrMessage.Data", Field, 0, ""}, + {"InterfaceAddrMessage.Header", Field, 0, ""}, + {"InterfaceAnnounceMessage", Type, 1, ""}, + {"InterfaceAnnounceMessage.Header", Field, 1, ""}, + {"InterfaceInfo", Type, 0, ""}, + {"InterfaceInfo.Address", Field, 0, ""}, + {"InterfaceInfo.BroadcastAddress", Field, 0, ""}, + {"InterfaceInfo.Flags", Field, 0, ""}, + {"InterfaceInfo.Netmask", Field, 0, ""}, + {"InterfaceMessage", Type, 0, ""}, + {"InterfaceMessage.Data", Field, 0, ""}, + {"InterfaceMessage.Header", Field, 0, ""}, + {"InterfaceMulticastAddrMessage", Type, 0, ""}, + {"InterfaceMulticastAddrMessage.Data", Field, 0, ""}, + {"InterfaceMulticastAddrMessage.Header", Field, 0, ""}, + {"InvalidHandle", Const, 0, ""}, + {"Ioperm", Func, 0, "func(from int, num int, on int) (err error)"}, + {"Iopl", Func, 0, "func(level int) (err error)"}, + {"Iovec", Type, 0, ""}, + {"Iovec.Base", Field, 0, ""}, + {"Iovec.Len", Field, 0, ""}, + {"IpAdapterInfo", Type, 0, ""}, + {"IpAdapterInfo.AdapterName", Field, 0, ""}, + {"IpAdapterInfo.Address", Field, 0, ""}, + {"IpAdapterInfo.AddressLength", Field, 0, ""}, + {"IpAdapterInfo.ComboIndex", Field, 0, ""}, + {"IpAdapterInfo.CurrentIpAddress", Field, 0, ""}, + {"IpAdapterInfo.Description", Field, 0, ""}, + {"IpAdapterInfo.DhcpEnabled", Field, 0, ""}, + {"IpAdapterInfo.DhcpServer", Field, 0, ""}, + {"IpAdapterInfo.GatewayList", Field, 0, ""}, + {"IpAdapterInfo.HaveWins", Field, 0, ""}, + {"IpAdapterInfo.Index", Field, 0, ""}, + {"IpAdapterInfo.IpAddressList", Field, 0, ""}, + {"IpAdapterInfo.LeaseExpires", Field, 0, ""}, + {"IpAdapterInfo.LeaseObtained", Field, 0, ""}, + {"IpAdapterInfo.Next", Field, 0, ""}, + {"IpAdapterInfo.PrimaryWinsServer", Field, 0, ""}, + {"IpAdapterInfo.SecondaryWinsServer", Field, 0, ""}, + {"IpAdapterInfo.Type", Field, 0, ""}, + {"IpAddrString", Type, 0, ""}, + {"IpAddrString.Context", Field, 0, ""}, + {"IpAddrString.IpAddress", Field, 0, ""}, + {"IpAddrString.IpMask", Field, 0, ""}, + {"IpAddrString.Next", Field, 0, ""}, + {"IpAddressString", Type, 0, ""}, + {"IpAddressString.String", Field, 0, ""}, + {"IpMaskString", Type, 0, ""}, + {"IpMaskString.String", Field, 2, ""}, + {"Issetugid", Func, 0, ""}, + {"KEY_ALL_ACCESS", Const, 0, ""}, + {"KEY_CREATE_LINK", Const, 0, ""}, + {"KEY_CREATE_SUB_KEY", Const, 0, ""}, + {"KEY_ENUMERATE_SUB_KEYS", Const, 0, ""}, + {"KEY_EXECUTE", Const, 0, ""}, + {"KEY_NOTIFY", Const, 0, ""}, + {"KEY_QUERY_VALUE", Const, 0, ""}, + {"KEY_READ", Const, 0, ""}, + {"KEY_SET_VALUE", Const, 0, ""}, + {"KEY_WOW64_32KEY", Const, 0, ""}, + {"KEY_WOW64_64KEY", Const, 0, ""}, + {"KEY_WRITE", Const, 0, ""}, + {"Kevent", Func, 0, ""}, + {"Kevent_t", Type, 0, ""}, + {"Kevent_t.Data", Field, 0, ""}, + {"Kevent_t.Fflags", Field, 0, ""}, + {"Kevent_t.Filter", Field, 0, ""}, + {"Kevent_t.Flags", Field, 0, ""}, + {"Kevent_t.Ident", Field, 0, ""}, + {"Kevent_t.Pad_cgo_0", Field, 2, ""}, + {"Kevent_t.Udata", Field, 0, ""}, + {"Kill", Func, 0, "func(pid int, sig Signal) (err error)"}, + {"Klogctl", Func, 0, "func(typ int, buf []byte) (n int, err error)"}, + {"Kqueue", Func, 0, ""}, + {"LANG_ENGLISH", Const, 0, ""}, + {"LAYERED_PROTOCOL", Const, 2, ""}, + {"LCNT_OVERLOAD_FLUSH", Const, 1, ""}, + {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0, ""}, + {"LINUX_REBOOT_CMD_CAD_ON", Const, 0, ""}, + {"LINUX_REBOOT_CMD_HALT", Const, 0, ""}, + {"LINUX_REBOOT_CMD_KEXEC", Const, 0, ""}, + {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0, ""}, + {"LINUX_REBOOT_CMD_RESTART", Const, 0, ""}, + {"LINUX_REBOOT_CMD_RESTART2", Const, 0, ""}, + {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0, ""}, + {"LINUX_REBOOT_MAGIC1", Const, 0, ""}, + {"LINUX_REBOOT_MAGIC2", Const, 0, ""}, + {"LOCK_EX", Const, 0, ""}, + {"LOCK_NB", Const, 0, ""}, + {"LOCK_SH", Const, 0, ""}, + {"LOCK_UN", Const, 0, ""}, + {"LazyDLL", Type, 0, ""}, + {"LazyDLL.Name", Field, 0, ""}, + {"LazyProc", Type, 0, ""}, + {"LazyProc.Name", Field, 0, ""}, + {"Lchown", Func, 0, "func(path string, uid int, gid int) (err error)"}, + {"Linger", Type, 0, ""}, + {"Linger.Linger", Field, 0, ""}, + {"Linger.Onoff", Field, 0, ""}, + {"Link", Func, 0, "func(oldpath string, newpath string) (err error)"}, + {"Listen", Func, 0, "func(s int, n int) (err error)"}, + {"Listxattr", Func, 1, "func(path string, dest []byte) (sz int, err error)"}, + {"LoadCancelIoEx", Func, 1, ""}, + {"LoadConnectEx", Func, 1, ""}, + {"LoadCreateSymbolicLink", Func, 4, ""}, + {"LoadDLL", Func, 0, ""}, + {"LoadGetAddrInfo", Func, 1, ""}, + {"LoadLibrary", Func, 0, ""}, + {"LoadSetFileCompletionNotificationModes", Func, 2, ""}, + {"LocalFree", Func, 0, ""}, + {"Log2phys_t", Type, 0, ""}, + {"Log2phys_t.Contigbytes", Field, 0, ""}, + {"Log2phys_t.Devoffset", Field, 0, ""}, + {"Log2phys_t.Flags", Field, 0, ""}, + {"LookupAccountName", Func, 0, ""}, + {"LookupAccountSid", Func, 0, ""}, + {"LookupSID", Func, 0, ""}, + {"LsfJump", Func, 0, "func(code int, k int, jt int, jf int) *SockFilter"}, + {"LsfSocket", Func, 0, "func(ifindex int, proto int) (int, error)"}, + {"LsfStmt", Func, 0, "func(code int, k int) *SockFilter"}, + {"Lstat", Func, 0, "func(path string, stat *Stat_t) (err error)"}, + {"MADV_AUTOSYNC", Const, 1, ""}, + {"MADV_CAN_REUSE", Const, 0, ""}, + {"MADV_CORE", Const, 1, ""}, + {"MADV_DOFORK", Const, 0, ""}, + {"MADV_DONTFORK", Const, 0, ""}, + {"MADV_DONTNEED", Const, 0, ""}, + {"MADV_FREE", Const, 0, ""}, + {"MADV_FREE_REUSABLE", Const, 0, ""}, + {"MADV_FREE_REUSE", Const, 0, ""}, + {"MADV_HUGEPAGE", Const, 0, ""}, + {"MADV_HWPOISON", Const, 0, ""}, + {"MADV_MERGEABLE", Const, 0, ""}, + {"MADV_NOCORE", Const, 1, ""}, + {"MADV_NOHUGEPAGE", Const, 0, ""}, + {"MADV_NORMAL", Const, 0, ""}, + {"MADV_NOSYNC", Const, 1, ""}, + {"MADV_PROTECT", Const, 1, ""}, + {"MADV_RANDOM", Const, 0, ""}, + {"MADV_REMOVE", Const, 0, ""}, + {"MADV_SEQUENTIAL", Const, 0, ""}, + {"MADV_SPACEAVAIL", Const, 3, ""}, + {"MADV_UNMERGEABLE", Const, 0, ""}, + {"MADV_WILLNEED", Const, 0, ""}, + {"MADV_ZERO_WIRED_PAGES", Const, 0, ""}, + {"MAP_32BIT", Const, 0, ""}, + {"MAP_ALIGNED_SUPER", Const, 3, ""}, + {"MAP_ALIGNMENT_16MB", Const, 3, ""}, + {"MAP_ALIGNMENT_1TB", Const, 3, ""}, + {"MAP_ALIGNMENT_256TB", Const, 3, ""}, + {"MAP_ALIGNMENT_4GB", Const, 3, ""}, + {"MAP_ALIGNMENT_64KB", Const, 3, ""}, + {"MAP_ALIGNMENT_64PB", Const, 3, ""}, + {"MAP_ALIGNMENT_MASK", Const, 3, ""}, + {"MAP_ALIGNMENT_SHIFT", Const, 3, ""}, + {"MAP_ANON", Const, 0, ""}, + {"MAP_ANONYMOUS", Const, 0, ""}, + {"MAP_COPY", Const, 0, ""}, + {"MAP_DENYWRITE", Const, 0, ""}, + {"MAP_EXECUTABLE", Const, 0, ""}, + {"MAP_FILE", Const, 0, ""}, + {"MAP_FIXED", Const, 0, ""}, + {"MAP_FLAGMASK", Const, 3, ""}, + {"MAP_GROWSDOWN", Const, 0, ""}, + {"MAP_HASSEMAPHORE", Const, 0, ""}, + {"MAP_HUGETLB", Const, 0, ""}, + {"MAP_INHERIT", Const, 3, ""}, + {"MAP_INHERIT_COPY", Const, 3, ""}, + {"MAP_INHERIT_DEFAULT", Const, 3, ""}, + {"MAP_INHERIT_DONATE_COPY", Const, 3, ""}, + {"MAP_INHERIT_NONE", Const, 3, ""}, + {"MAP_INHERIT_SHARE", Const, 3, ""}, + {"MAP_JIT", Const, 0, ""}, + {"MAP_LOCKED", Const, 0, ""}, + {"MAP_NOCACHE", Const, 0, ""}, + {"MAP_NOCORE", Const, 1, ""}, + {"MAP_NOEXTEND", Const, 0, ""}, + {"MAP_NONBLOCK", Const, 0, ""}, + {"MAP_NORESERVE", Const, 0, ""}, + {"MAP_NOSYNC", Const, 1, ""}, + {"MAP_POPULATE", Const, 0, ""}, + {"MAP_PREFAULT_READ", Const, 1, ""}, + {"MAP_PRIVATE", Const, 0, ""}, + {"MAP_RENAME", Const, 0, ""}, + {"MAP_RESERVED0080", Const, 0, ""}, + {"MAP_RESERVED0100", Const, 1, ""}, + {"MAP_SHARED", Const, 0, ""}, + {"MAP_STACK", Const, 0, ""}, + {"MAP_TRYFIXED", Const, 3, ""}, + {"MAP_TYPE", Const, 0, ""}, + {"MAP_WIRED", Const, 3, ""}, + {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4, ""}, + {"MAXLEN_IFDESCR", Const, 0, ""}, + {"MAXLEN_PHYSADDR", Const, 0, ""}, + {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0, ""}, + {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0, ""}, + {"MAX_ADAPTER_NAME_LENGTH", Const, 0, ""}, + {"MAX_COMPUTERNAME_LENGTH", Const, 0, ""}, + {"MAX_INTERFACE_NAME_LEN", Const, 0, ""}, + {"MAX_LONG_PATH", Const, 0, ""}, + {"MAX_PATH", Const, 0, ""}, + {"MAX_PROTOCOL_CHAIN", Const, 2, ""}, + {"MCL_CURRENT", Const, 0, ""}, + {"MCL_FUTURE", Const, 0, ""}, + {"MNT_DETACH", Const, 0, ""}, + {"MNT_EXPIRE", Const, 0, ""}, + {"MNT_FORCE", Const, 0, ""}, + {"MSG_BCAST", Const, 1, ""}, + {"MSG_CMSG_CLOEXEC", Const, 0, ""}, + {"MSG_COMPAT", Const, 0, ""}, + {"MSG_CONFIRM", Const, 0, ""}, + {"MSG_CONTROLMBUF", Const, 1, ""}, + {"MSG_CTRUNC", Const, 0, ""}, + {"MSG_DONTROUTE", Const, 0, ""}, + {"MSG_DONTWAIT", Const, 0, ""}, + {"MSG_EOF", Const, 0, ""}, + {"MSG_EOR", Const, 0, ""}, + {"MSG_ERRQUEUE", Const, 0, ""}, + {"MSG_FASTOPEN", Const, 1, ""}, + {"MSG_FIN", Const, 0, ""}, + {"MSG_FLUSH", Const, 0, ""}, + {"MSG_HAVEMORE", Const, 0, ""}, + {"MSG_HOLD", Const, 0, ""}, + {"MSG_IOVUSRSPACE", Const, 1, ""}, + {"MSG_LENUSRSPACE", Const, 1, ""}, + {"MSG_MCAST", Const, 1, ""}, + {"MSG_MORE", Const, 0, ""}, + {"MSG_NAMEMBUF", Const, 1, ""}, + {"MSG_NBIO", Const, 0, ""}, + {"MSG_NEEDSA", Const, 0, ""}, + {"MSG_NOSIGNAL", Const, 0, ""}, + {"MSG_NOTIFICATION", Const, 0, ""}, + {"MSG_OOB", Const, 0, ""}, + {"MSG_PEEK", Const, 0, ""}, + {"MSG_PROXY", Const, 0, ""}, + {"MSG_RCVMORE", Const, 0, ""}, + {"MSG_RST", Const, 0, ""}, + {"MSG_SEND", Const, 0, ""}, + {"MSG_SYN", Const, 0, ""}, + {"MSG_TRUNC", Const, 0, ""}, + {"MSG_TRYHARD", Const, 0, ""}, + {"MSG_USERFLAGS", Const, 1, ""}, + {"MSG_WAITALL", Const, 0, ""}, + {"MSG_WAITFORONE", Const, 0, ""}, + {"MSG_WAITSTREAM", Const, 0, ""}, + {"MS_ACTIVE", Const, 0, ""}, + {"MS_ASYNC", Const, 0, ""}, + {"MS_BIND", Const, 0, ""}, + {"MS_DEACTIVATE", Const, 0, ""}, + {"MS_DIRSYNC", Const, 0, ""}, + {"MS_INVALIDATE", Const, 0, ""}, + {"MS_I_VERSION", Const, 0, ""}, + {"MS_KERNMOUNT", Const, 0, ""}, + {"MS_KILLPAGES", Const, 0, ""}, + {"MS_MANDLOCK", Const, 0, ""}, + {"MS_MGC_MSK", Const, 0, ""}, + {"MS_MGC_VAL", Const, 0, ""}, + {"MS_MOVE", Const, 0, ""}, + {"MS_NOATIME", Const, 0, ""}, + {"MS_NODEV", Const, 0, ""}, + {"MS_NODIRATIME", Const, 0, ""}, + {"MS_NOEXEC", Const, 0, ""}, + {"MS_NOSUID", Const, 0, ""}, + {"MS_NOUSER", Const, 0, ""}, + {"MS_POSIXACL", Const, 0, ""}, + {"MS_PRIVATE", Const, 0, ""}, + {"MS_RDONLY", Const, 0, ""}, + {"MS_REC", Const, 0, ""}, + {"MS_RELATIME", Const, 0, ""}, + {"MS_REMOUNT", Const, 0, ""}, + {"MS_RMT_MASK", Const, 0, ""}, + {"MS_SHARED", Const, 0, ""}, + {"MS_SILENT", Const, 0, ""}, + {"MS_SLAVE", Const, 0, ""}, + {"MS_STRICTATIME", Const, 0, ""}, + {"MS_SYNC", Const, 0, ""}, + {"MS_SYNCHRONOUS", Const, 0, ""}, + {"MS_UNBINDABLE", Const, 0, ""}, + {"Madvise", Func, 0, "func(b []byte, advice int) (err error)"}, + {"MapViewOfFile", Func, 0, ""}, + {"MaxTokenInfoClass", Const, 0, ""}, + {"Mclpool", Type, 2, ""}, + {"Mclpool.Alive", Field, 2, ""}, + {"Mclpool.Cwm", Field, 2, ""}, + {"Mclpool.Grown", Field, 2, ""}, + {"Mclpool.Hwm", Field, 2, ""}, + {"Mclpool.Lwm", Field, 2, ""}, + {"MibIfRow", Type, 0, ""}, + {"MibIfRow.AdminStatus", Field, 0, ""}, + {"MibIfRow.Descr", Field, 0, ""}, + {"MibIfRow.DescrLen", Field, 0, ""}, + {"MibIfRow.InDiscards", Field, 0, ""}, + {"MibIfRow.InErrors", Field, 0, ""}, + {"MibIfRow.InNUcastPkts", Field, 0, ""}, + {"MibIfRow.InOctets", Field, 0, ""}, + {"MibIfRow.InUcastPkts", Field, 0, ""}, + {"MibIfRow.InUnknownProtos", Field, 0, ""}, + {"MibIfRow.Index", Field, 0, ""}, + {"MibIfRow.LastChange", Field, 0, ""}, + {"MibIfRow.Mtu", Field, 0, ""}, + {"MibIfRow.Name", Field, 0, ""}, + {"MibIfRow.OperStatus", Field, 0, ""}, + {"MibIfRow.OutDiscards", Field, 0, ""}, + {"MibIfRow.OutErrors", Field, 0, ""}, + {"MibIfRow.OutNUcastPkts", Field, 0, ""}, + {"MibIfRow.OutOctets", Field, 0, ""}, + {"MibIfRow.OutQLen", Field, 0, ""}, + {"MibIfRow.OutUcastPkts", Field, 0, ""}, + {"MibIfRow.PhysAddr", Field, 0, ""}, + {"MibIfRow.PhysAddrLen", Field, 0, ""}, + {"MibIfRow.Speed", Field, 0, ""}, + {"MibIfRow.Type", Field, 0, ""}, + {"Mkdir", Func, 0, "func(path string, mode uint32) (err error)"}, + {"Mkdirat", Func, 0, "func(dirfd int, path string, mode uint32) (err error)"}, + {"Mkfifo", Func, 0, "func(path string, mode uint32) (err error)"}, + {"Mknod", Func, 0, "func(path string, mode uint32, dev int) (err error)"}, + {"Mknodat", Func, 0, "func(dirfd int, path string, mode uint32, dev int) (err error)"}, + {"Mlock", Func, 0, "func(b []byte) (err error)"}, + {"Mlockall", Func, 0, "func(flags int) (err error)"}, + {"Mmap", Func, 0, "func(fd int, offset int64, length int, prot int, flags int) (data []byte, err error)"}, + {"Mount", Func, 0, "func(source string, target string, fstype string, flags uintptr, data string) (err error)"}, + {"MoveFile", Func, 0, ""}, + {"Mprotect", Func, 0, "func(b []byte, prot int) (err error)"}, + {"Msghdr", Type, 0, ""}, + {"Msghdr.Control", Field, 0, ""}, + {"Msghdr.Controllen", Field, 0, ""}, + {"Msghdr.Flags", Field, 0, ""}, + {"Msghdr.Iov", Field, 0, ""}, + {"Msghdr.Iovlen", Field, 0, ""}, + {"Msghdr.Name", Field, 0, ""}, + {"Msghdr.Namelen", Field, 0, ""}, + {"Msghdr.Pad_cgo_0", Field, 0, ""}, + {"Msghdr.Pad_cgo_1", Field, 0, ""}, + {"Munlock", Func, 0, "func(b []byte) (err error)"}, + {"Munlockall", Func, 0, "func() (err error)"}, + {"Munmap", Func, 0, "func(b []byte) (err error)"}, + {"MustLoadDLL", Func, 0, ""}, + {"NAME_MAX", Const, 0, ""}, + {"NETLINK_ADD_MEMBERSHIP", Const, 0, ""}, + {"NETLINK_AUDIT", Const, 0, ""}, + {"NETLINK_BROADCAST_ERROR", Const, 0, ""}, + {"NETLINK_CONNECTOR", Const, 0, ""}, + {"NETLINK_DNRTMSG", Const, 0, ""}, + {"NETLINK_DROP_MEMBERSHIP", Const, 0, ""}, + {"NETLINK_ECRYPTFS", Const, 0, ""}, + {"NETLINK_FIB_LOOKUP", Const, 0, ""}, + {"NETLINK_FIREWALL", Const, 0, ""}, + {"NETLINK_GENERIC", Const, 0, ""}, + {"NETLINK_INET_DIAG", Const, 0, ""}, + {"NETLINK_IP6_FW", Const, 0, ""}, + {"NETLINK_ISCSI", Const, 0, ""}, + {"NETLINK_KOBJECT_UEVENT", Const, 0, ""}, + {"NETLINK_NETFILTER", Const, 0, ""}, + {"NETLINK_NFLOG", Const, 0, ""}, + {"NETLINK_NO_ENOBUFS", Const, 0, ""}, + {"NETLINK_PKTINFO", Const, 0, ""}, + {"NETLINK_RDMA", Const, 0, ""}, + {"NETLINK_ROUTE", Const, 0, ""}, + {"NETLINK_SCSITRANSPORT", Const, 0, ""}, + {"NETLINK_SELINUX", Const, 0, ""}, + {"NETLINK_UNUSED", Const, 0, ""}, + {"NETLINK_USERSOCK", Const, 0, ""}, + {"NETLINK_XFRM", Const, 0, ""}, + {"NET_RT_DUMP", Const, 0, ""}, + {"NET_RT_DUMP2", Const, 0, ""}, + {"NET_RT_FLAGS", Const, 0, ""}, + {"NET_RT_IFLIST", Const, 0, ""}, + {"NET_RT_IFLIST2", Const, 0, ""}, + {"NET_RT_IFLISTL", Const, 1, ""}, + {"NET_RT_IFMALIST", Const, 0, ""}, + {"NET_RT_MAXID", Const, 0, ""}, + {"NET_RT_OIFLIST", Const, 1, ""}, + {"NET_RT_OOIFLIST", Const, 1, ""}, + {"NET_RT_STAT", Const, 0, ""}, + {"NET_RT_STATS", Const, 1, ""}, + {"NET_RT_TABLE", Const, 1, ""}, + {"NET_RT_TRASH", Const, 0, ""}, + {"NLA_ALIGNTO", Const, 0, ""}, + {"NLA_F_NESTED", Const, 0, ""}, + {"NLA_F_NET_BYTEORDER", Const, 0, ""}, + {"NLA_HDRLEN", Const, 0, ""}, + {"NLMSG_ALIGNTO", Const, 0, ""}, + {"NLMSG_DONE", Const, 0, ""}, + {"NLMSG_ERROR", Const, 0, ""}, + {"NLMSG_HDRLEN", Const, 0, ""}, + {"NLMSG_MIN_TYPE", Const, 0, ""}, + {"NLMSG_NOOP", Const, 0, ""}, + {"NLMSG_OVERRUN", Const, 0, ""}, + {"NLM_F_ACK", Const, 0, ""}, + {"NLM_F_APPEND", Const, 0, ""}, + {"NLM_F_ATOMIC", Const, 0, ""}, + {"NLM_F_CREATE", Const, 0, ""}, + {"NLM_F_DUMP", Const, 0, ""}, + {"NLM_F_ECHO", Const, 0, ""}, + {"NLM_F_EXCL", Const, 0, ""}, + {"NLM_F_MATCH", Const, 0, ""}, + {"NLM_F_MULTI", Const, 0, ""}, + {"NLM_F_REPLACE", Const, 0, ""}, + {"NLM_F_REQUEST", Const, 0, ""}, + {"NLM_F_ROOT", Const, 0, ""}, + {"NOFLSH", Const, 0, ""}, + {"NOTE_ABSOLUTE", Const, 0, ""}, + {"NOTE_ATTRIB", Const, 0, ""}, + {"NOTE_BACKGROUND", Const, 16, ""}, + {"NOTE_CHILD", Const, 0, ""}, + {"NOTE_CRITICAL", Const, 16, ""}, + {"NOTE_DELETE", Const, 0, ""}, + {"NOTE_EOF", Const, 1, ""}, + {"NOTE_EXEC", Const, 0, ""}, + {"NOTE_EXIT", Const, 0, ""}, + {"NOTE_EXITSTATUS", Const, 0, ""}, + {"NOTE_EXIT_CSERROR", Const, 16, ""}, + {"NOTE_EXIT_DECRYPTFAIL", Const, 16, ""}, + {"NOTE_EXIT_DETAIL", Const, 16, ""}, + {"NOTE_EXIT_DETAIL_MASK", Const, 16, ""}, + {"NOTE_EXIT_MEMORY", Const, 16, ""}, + {"NOTE_EXIT_REPARENTED", Const, 16, ""}, + {"NOTE_EXTEND", Const, 0, ""}, + {"NOTE_FFAND", Const, 0, ""}, + {"NOTE_FFCOPY", Const, 0, ""}, + {"NOTE_FFCTRLMASK", Const, 0, ""}, + {"NOTE_FFLAGSMASK", Const, 0, ""}, + {"NOTE_FFNOP", Const, 0, ""}, + {"NOTE_FFOR", Const, 0, ""}, + {"NOTE_FORK", Const, 0, ""}, + {"NOTE_LEEWAY", Const, 16, ""}, + {"NOTE_LINK", Const, 0, ""}, + {"NOTE_LOWAT", Const, 0, ""}, + {"NOTE_NONE", Const, 0, ""}, + {"NOTE_NSECONDS", Const, 0, ""}, + {"NOTE_PCTRLMASK", Const, 0, ""}, + {"NOTE_PDATAMASK", Const, 0, ""}, + {"NOTE_REAP", Const, 0, ""}, + {"NOTE_RENAME", Const, 0, ""}, + {"NOTE_RESOURCEEND", Const, 0, ""}, + {"NOTE_REVOKE", Const, 0, ""}, + {"NOTE_SECONDS", Const, 0, ""}, + {"NOTE_SIGNAL", Const, 0, ""}, + {"NOTE_TRACK", Const, 0, ""}, + {"NOTE_TRACKERR", Const, 0, ""}, + {"NOTE_TRIGGER", Const, 0, ""}, + {"NOTE_TRUNCATE", Const, 1, ""}, + {"NOTE_USECONDS", Const, 0, ""}, + {"NOTE_VM_ERROR", Const, 0, ""}, + {"NOTE_VM_PRESSURE", Const, 0, ""}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0, ""}, + {"NOTE_VM_PRESSURE_TERMINATE", Const, 0, ""}, + {"NOTE_WRITE", Const, 0, ""}, + {"NameCanonical", Const, 0, ""}, + {"NameCanonicalEx", Const, 0, ""}, + {"NameDisplay", Const, 0, ""}, + {"NameDnsDomain", Const, 0, ""}, + {"NameFullyQualifiedDN", Const, 0, ""}, + {"NameSamCompatible", Const, 0, ""}, + {"NameServicePrincipal", Const, 0, ""}, + {"NameUniqueId", Const, 0, ""}, + {"NameUnknown", Const, 0, ""}, + {"NameUserPrincipal", Const, 0, ""}, + {"Nanosleep", Func, 0, "func(time *Timespec, leftover *Timespec) (err error)"}, + {"NetApiBufferFree", Func, 0, ""}, + {"NetGetJoinInformation", Func, 2, ""}, + {"NetSetupDomainName", Const, 2, ""}, + {"NetSetupUnjoined", Const, 2, ""}, + {"NetSetupUnknownStatus", Const, 2, ""}, + {"NetSetupWorkgroupName", Const, 2, ""}, + {"NetUserGetInfo", Func, 0, ""}, + {"NetlinkMessage", Type, 0, ""}, + {"NetlinkMessage.Data", Field, 0, ""}, + {"NetlinkMessage.Header", Field, 0, ""}, + {"NetlinkRIB", Func, 0, "func(proto int, family int) ([]byte, error)"}, + {"NetlinkRouteAttr", Type, 0, ""}, + {"NetlinkRouteAttr.Attr", Field, 0, ""}, + {"NetlinkRouteAttr.Value", Field, 0, ""}, + {"NetlinkRouteRequest", Type, 0, ""}, + {"NetlinkRouteRequest.Data", Field, 0, ""}, + {"NetlinkRouteRequest.Header", Field, 0, ""}, + {"NewCallback", Func, 0, ""}, + {"NewCallbackCDecl", Func, 3, ""}, + {"NewLazyDLL", Func, 0, ""}, + {"NlAttr", Type, 0, ""}, + {"NlAttr.Len", Field, 0, ""}, + {"NlAttr.Type", Field, 0, ""}, + {"NlMsgerr", Type, 0, ""}, + {"NlMsgerr.Error", Field, 0, ""}, + {"NlMsgerr.Msg", Field, 0, ""}, + {"NlMsghdr", Type, 0, ""}, + {"NlMsghdr.Flags", Field, 0, ""}, + {"NlMsghdr.Len", Field, 0, ""}, + {"NlMsghdr.Pid", Field, 0, ""}, + {"NlMsghdr.Seq", Field, 0, ""}, + {"NlMsghdr.Type", Field, 0, ""}, + {"NsecToFiletime", Func, 0, ""}, + {"NsecToTimespec", Func, 0, "func(nsec int64) Timespec"}, + {"NsecToTimeval", Func, 0, "func(nsec int64) Timeval"}, + {"Ntohs", Func, 0, ""}, + {"OCRNL", Const, 0, ""}, + {"OFDEL", Const, 0, ""}, + {"OFILL", Const, 0, ""}, + {"OFIOGETBMAP", Const, 1, ""}, + {"OID_PKIX_KP_SERVER_AUTH", Var, 0, ""}, + {"OID_SERVER_GATED_CRYPTO", Var, 0, ""}, + {"OID_SGC_NETSCAPE", Var, 0, ""}, + {"OLCUC", Const, 0, ""}, + {"ONLCR", Const, 0, ""}, + {"ONLRET", Const, 0, ""}, + {"ONOCR", Const, 0, ""}, + {"ONOEOT", Const, 1, ""}, + {"OPEN_ALWAYS", Const, 0, ""}, + {"OPEN_EXISTING", Const, 0, ""}, + {"OPOST", Const, 0, ""}, + {"O_ACCMODE", Const, 0, ""}, + {"O_ALERT", Const, 0, ""}, + {"O_ALT_IO", Const, 1, ""}, + {"O_APPEND", Const, 0, ""}, + {"O_ASYNC", Const, 0, ""}, + {"O_CLOEXEC", Const, 0, ""}, + {"O_CREAT", Const, 0, ""}, + {"O_DIRECT", Const, 0, ""}, + {"O_DIRECTORY", Const, 0, ""}, + {"O_DP_GETRAWENCRYPTED", Const, 16, ""}, + {"O_DSYNC", Const, 0, ""}, + {"O_EVTONLY", Const, 0, ""}, + {"O_EXCL", Const, 0, ""}, + {"O_EXEC", Const, 0, ""}, + {"O_EXLOCK", Const, 0, ""}, + {"O_FSYNC", Const, 0, ""}, + {"O_LARGEFILE", Const, 0, ""}, + {"O_NDELAY", Const, 0, ""}, + {"O_NOATIME", Const, 0, ""}, + {"O_NOCTTY", Const, 0, ""}, + {"O_NOFOLLOW", Const, 0, ""}, + {"O_NONBLOCK", Const, 0, ""}, + {"O_NOSIGPIPE", Const, 1, ""}, + {"O_POPUP", Const, 0, ""}, + {"O_RDONLY", Const, 0, ""}, + {"O_RDWR", Const, 0, ""}, + {"O_RSYNC", Const, 0, ""}, + {"O_SHLOCK", Const, 0, ""}, + {"O_SYMLINK", Const, 0, ""}, + {"O_SYNC", Const, 0, ""}, + {"O_TRUNC", Const, 0, ""}, + {"O_TTY_INIT", Const, 0, ""}, + {"O_WRONLY", Const, 0, ""}, + {"Open", Func, 0, "func(path string, mode int, perm uint32) (fd int, err error)"}, + {"OpenCurrentProcessToken", Func, 0, ""}, + {"OpenProcess", Func, 0, ""}, + {"OpenProcessToken", Func, 0, ""}, + {"Openat", Func, 0, "func(dirfd int, path string, flags int, mode uint32) (fd int, err error)"}, + {"Overlapped", Type, 0, ""}, + {"Overlapped.HEvent", Field, 0, ""}, + {"Overlapped.Internal", Field, 0, ""}, + {"Overlapped.InternalHigh", Field, 0, ""}, + {"Overlapped.Offset", Field, 0, ""}, + {"Overlapped.OffsetHigh", Field, 0, ""}, + {"PACKET_ADD_MEMBERSHIP", Const, 0, ""}, + {"PACKET_BROADCAST", Const, 0, ""}, + {"PACKET_DROP_MEMBERSHIP", Const, 0, ""}, + {"PACKET_FASTROUTE", Const, 0, ""}, + {"PACKET_HOST", Const, 0, ""}, + {"PACKET_LOOPBACK", Const, 0, ""}, + {"PACKET_MR_ALLMULTI", Const, 0, ""}, + {"PACKET_MR_MULTICAST", Const, 0, ""}, + {"PACKET_MR_PROMISC", Const, 0, ""}, + {"PACKET_MULTICAST", Const, 0, ""}, + {"PACKET_OTHERHOST", Const, 0, ""}, + {"PACKET_OUTGOING", Const, 0, ""}, + {"PACKET_RECV_OUTPUT", Const, 0, ""}, + {"PACKET_RX_RING", Const, 0, ""}, + {"PACKET_STATISTICS", Const, 0, ""}, + {"PAGE_EXECUTE_READ", Const, 0, ""}, + {"PAGE_EXECUTE_READWRITE", Const, 0, ""}, + {"PAGE_EXECUTE_WRITECOPY", Const, 0, ""}, + {"PAGE_READONLY", Const, 0, ""}, + {"PAGE_READWRITE", Const, 0, ""}, + {"PAGE_WRITECOPY", Const, 0, ""}, + {"PARENB", Const, 0, ""}, + {"PARMRK", Const, 0, ""}, + {"PARODD", Const, 0, ""}, + {"PENDIN", Const, 0, ""}, + {"PFL_HIDDEN", Const, 2, ""}, + {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2, ""}, + {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2, ""}, + {"PFL_NETWORKDIRECT_PROVIDER", Const, 2, ""}, + {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2, ""}, + {"PF_FLUSH", Const, 1, ""}, + {"PKCS_7_ASN_ENCODING", Const, 0, ""}, + {"PMC5_PIPELINE_FLUSH", Const, 1, ""}, + {"PRIO_PGRP", Const, 2, ""}, + {"PRIO_PROCESS", Const, 2, ""}, + {"PRIO_USER", Const, 2, ""}, + {"PRI_IOFLUSH", Const, 1, ""}, + {"PROCESS_QUERY_INFORMATION", Const, 0, ""}, + {"PROCESS_TERMINATE", Const, 2, ""}, + {"PROT_EXEC", Const, 0, ""}, + {"PROT_GROWSDOWN", Const, 0, ""}, + {"PROT_GROWSUP", Const, 0, ""}, + {"PROT_NONE", Const, 0, ""}, + {"PROT_READ", Const, 0, ""}, + {"PROT_WRITE", Const, 0, ""}, + {"PROV_DH_SCHANNEL", Const, 0, ""}, + {"PROV_DSS", Const, 0, ""}, + {"PROV_DSS_DH", Const, 0, ""}, + {"PROV_EC_ECDSA_FULL", Const, 0, ""}, + {"PROV_EC_ECDSA_SIG", Const, 0, ""}, + {"PROV_EC_ECNRA_FULL", Const, 0, ""}, + {"PROV_EC_ECNRA_SIG", Const, 0, ""}, + {"PROV_FORTEZZA", Const, 0, ""}, + {"PROV_INTEL_SEC", Const, 0, ""}, + {"PROV_MS_EXCHANGE", Const, 0, ""}, + {"PROV_REPLACE_OWF", Const, 0, ""}, + {"PROV_RNG", Const, 0, ""}, + {"PROV_RSA_AES", Const, 0, ""}, + {"PROV_RSA_FULL", Const, 0, ""}, + {"PROV_RSA_SCHANNEL", Const, 0, ""}, + {"PROV_RSA_SIG", Const, 0, ""}, + {"PROV_SPYRUS_LYNKS", Const, 0, ""}, + {"PROV_SSL", Const, 0, ""}, + {"PR_CAPBSET_DROP", Const, 0, ""}, + {"PR_CAPBSET_READ", Const, 0, ""}, + {"PR_CLEAR_SECCOMP_FILTER", Const, 0, ""}, + {"PR_ENDIAN_BIG", Const, 0, ""}, + {"PR_ENDIAN_LITTLE", Const, 0, ""}, + {"PR_ENDIAN_PPC_LITTLE", Const, 0, ""}, + {"PR_FPEMU_NOPRINT", Const, 0, ""}, + {"PR_FPEMU_SIGFPE", Const, 0, ""}, + {"PR_FP_EXC_ASYNC", Const, 0, ""}, + {"PR_FP_EXC_DISABLED", Const, 0, ""}, + {"PR_FP_EXC_DIV", Const, 0, ""}, + {"PR_FP_EXC_INV", Const, 0, ""}, + {"PR_FP_EXC_NONRECOV", Const, 0, ""}, + {"PR_FP_EXC_OVF", Const, 0, ""}, + {"PR_FP_EXC_PRECISE", Const, 0, ""}, + {"PR_FP_EXC_RES", Const, 0, ""}, + {"PR_FP_EXC_SW_ENABLE", Const, 0, ""}, + {"PR_FP_EXC_UND", Const, 0, ""}, + {"PR_GET_DUMPABLE", Const, 0, ""}, + {"PR_GET_ENDIAN", Const, 0, ""}, + {"PR_GET_FPEMU", Const, 0, ""}, + {"PR_GET_FPEXC", Const, 0, ""}, + {"PR_GET_KEEPCAPS", Const, 0, ""}, + {"PR_GET_NAME", Const, 0, ""}, + {"PR_GET_PDEATHSIG", Const, 0, ""}, + {"PR_GET_SECCOMP", Const, 0, ""}, + {"PR_GET_SECCOMP_FILTER", Const, 0, ""}, + {"PR_GET_SECUREBITS", Const, 0, ""}, + {"PR_GET_TIMERSLACK", Const, 0, ""}, + {"PR_GET_TIMING", Const, 0, ""}, + {"PR_GET_TSC", Const, 0, ""}, + {"PR_GET_UNALIGN", Const, 0, ""}, + {"PR_MCE_KILL", Const, 0, ""}, + {"PR_MCE_KILL_CLEAR", Const, 0, ""}, + {"PR_MCE_KILL_DEFAULT", Const, 0, ""}, + {"PR_MCE_KILL_EARLY", Const, 0, ""}, + {"PR_MCE_KILL_GET", Const, 0, ""}, + {"PR_MCE_KILL_LATE", Const, 0, ""}, + {"PR_MCE_KILL_SET", Const, 0, ""}, + {"PR_SECCOMP_FILTER_EVENT", Const, 0, ""}, + {"PR_SECCOMP_FILTER_SYSCALL", Const, 0, ""}, + {"PR_SET_DUMPABLE", Const, 0, ""}, + {"PR_SET_ENDIAN", Const, 0, ""}, + {"PR_SET_FPEMU", Const, 0, ""}, + {"PR_SET_FPEXC", Const, 0, ""}, + {"PR_SET_KEEPCAPS", Const, 0, ""}, + {"PR_SET_NAME", Const, 0, ""}, + {"PR_SET_PDEATHSIG", Const, 0, ""}, + {"PR_SET_PTRACER", Const, 0, ""}, + {"PR_SET_SECCOMP", Const, 0, ""}, + {"PR_SET_SECCOMP_FILTER", Const, 0, ""}, + {"PR_SET_SECUREBITS", Const, 0, ""}, + {"PR_SET_TIMERSLACK", Const, 0, ""}, + {"PR_SET_TIMING", Const, 0, ""}, + {"PR_SET_TSC", Const, 0, ""}, + {"PR_SET_UNALIGN", Const, 0, ""}, + {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0, ""}, + {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0, ""}, + {"PR_TIMING_STATISTICAL", Const, 0, ""}, + {"PR_TIMING_TIMESTAMP", Const, 0, ""}, + {"PR_TSC_ENABLE", Const, 0, ""}, + {"PR_TSC_SIGSEGV", Const, 0, ""}, + {"PR_UNALIGN_NOPRINT", Const, 0, ""}, + {"PR_UNALIGN_SIGBUS", Const, 0, ""}, + {"PTRACE_ARCH_PRCTL", Const, 0, ""}, + {"PTRACE_ATTACH", Const, 0, ""}, + {"PTRACE_CONT", Const, 0, ""}, + {"PTRACE_DETACH", Const, 0, ""}, + {"PTRACE_EVENT_CLONE", Const, 0, ""}, + {"PTRACE_EVENT_EXEC", Const, 0, ""}, + {"PTRACE_EVENT_EXIT", Const, 0, ""}, + {"PTRACE_EVENT_FORK", Const, 0, ""}, + {"PTRACE_EVENT_VFORK", Const, 0, ""}, + {"PTRACE_EVENT_VFORK_DONE", Const, 0, ""}, + {"PTRACE_GETCRUNCHREGS", Const, 0, ""}, + {"PTRACE_GETEVENTMSG", Const, 0, ""}, + {"PTRACE_GETFPREGS", Const, 0, ""}, + {"PTRACE_GETFPXREGS", Const, 0, ""}, + {"PTRACE_GETHBPREGS", Const, 0, ""}, + {"PTRACE_GETREGS", Const, 0, ""}, + {"PTRACE_GETREGSET", Const, 0, ""}, + {"PTRACE_GETSIGINFO", Const, 0, ""}, + {"PTRACE_GETVFPREGS", Const, 0, ""}, + {"PTRACE_GETWMMXREGS", Const, 0, ""}, + {"PTRACE_GET_THREAD_AREA", Const, 0, ""}, + {"PTRACE_KILL", Const, 0, ""}, + {"PTRACE_OLDSETOPTIONS", Const, 0, ""}, + {"PTRACE_O_MASK", Const, 0, ""}, + {"PTRACE_O_TRACECLONE", Const, 0, ""}, + {"PTRACE_O_TRACEEXEC", Const, 0, ""}, + {"PTRACE_O_TRACEEXIT", Const, 0, ""}, + {"PTRACE_O_TRACEFORK", Const, 0, ""}, + {"PTRACE_O_TRACESYSGOOD", Const, 0, ""}, + {"PTRACE_O_TRACEVFORK", Const, 0, ""}, + {"PTRACE_O_TRACEVFORKDONE", Const, 0, ""}, + {"PTRACE_PEEKDATA", Const, 0, ""}, + {"PTRACE_PEEKTEXT", Const, 0, ""}, + {"PTRACE_PEEKUSR", Const, 0, ""}, + {"PTRACE_POKEDATA", Const, 0, ""}, + {"PTRACE_POKETEXT", Const, 0, ""}, + {"PTRACE_POKEUSR", Const, 0, ""}, + {"PTRACE_SETCRUNCHREGS", Const, 0, ""}, + {"PTRACE_SETFPREGS", Const, 0, ""}, + {"PTRACE_SETFPXREGS", Const, 0, ""}, + {"PTRACE_SETHBPREGS", Const, 0, ""}, + {"PTRACE_SETOPTIONS", Const, 0, ""}, + {"PTRACE_SETREGS", Const, 0, ""}, + {"PTRACE_SETREGSET", Const, 0, ""}, + {"PTRACE_SETSIGINFO", Const, 0, ""}, + {"PTRACE_SETVFPREGS", Const, 0, ""}, + {"PTRACE_SETWMMXREGS", Const, 0, ""}, + {"PTRACE_SET_SYSCALL", Const, 0, ""}, + {"PTRACE_SET_THREAD_AREA", Const, 0, ""}, + {"PTRACE_SINGLEBLOCK", Const, 0, ""}, + {"PTRACE_SINGLESTEP", Const, 0, ""}, + {"PTRACE_SYSCALL", Const, 0, ""}, + {"PTRACE_SYSEMU", Const, 0, ""}, + {"PTRACE_SYSEMU_SINGLESTEP", Const, 0, ""}, + {"PTRACE_TRACEME", Const, 0, ""}, + {"PT_ATTACH", Const, 0, ""}, + {"PT_ATTACHEXC", Const, 0, ""}, + {"PT_CONTINUE", Const, 0, ""}, + {"PT_DATA_ADDR", Const, 0, ""}, + {"PT_DENY_ATTACH", Const, 0, ""}, + {"PT_DETACH", Const, 0, ""}, + {"PT_FIRSTMACH", Const, 0, ""}, + {"PT_FORCEQUOTA", Const, 0, ""}, + {"PT_KILL", Const, 0, ""}, + {"PT_MASK", Const, 1, ""}, + {"PT_READ_D", Const, 0, ""}, + {"PT_READ_I", Const, 0, ""}, + {"PT_READ_U", Const, 0, ""}, + {"PT_SIGEXC", Const, 0, ""}, + {"PT_STEP", Const, 0, ""}, + {"PT_TEXT_ADDR", Const, 0, ""}, + {"PT_TEXT_END_ADDR", Const, 0, ""}, + {"PT_THUPDATE", Const, 0, ""}, + {"PT_TRACE_ME", Const, 0, ""}, + {"PT_WRITE_D", Const, 0, ""}, + {"PT_WRITE_I", Const, 0, ""}, + {"PT_WRITE_U", Const, 0, ""}, + {"ParseDirent", Func, 0, "func(buf []byte, max int, names []string) (consumed int, count int, newnames []string)"}, + {"ParseNetlinkMessage", Func, 0, "func(b []byte) ([]NetlinkMessage, error)"}, + {"ParseNetlinkRouteAttr", Func, 0, "func(m *NetlinkMessage) ([]NetlinkRouteAttr, error)"}, + {"ParseRoutingMessage", Func, 0, ""}, + {"ParseRoutingSockaddr", Func, 0, ""}, + {"ParseSocketControlMessage", Func, 0, "func(b []byte) ([]SocketControlMessage, error)"}, + {"ParseUnixCredentials", Func, 0, "func(m *SocketControlMessage) (*Ucred, error)"}, + {"ParseUnixRights", Func, 0, "func(m *SocketControlMessage) ([]int, error)"}, + {"PathMax", Const, 0, ""}, + {"Pathconf", Func, 0, ""}, + {"Pause", Func, 0, "func() (err error)"}, + {"Pipe", Func, 0, "func(p []int) error"}, + {"Pipe2", Func, 1, "func(p []int, flags int) error"}, + {"PivotRoot", Func, 0, "func(newroot string, putold string) (err error)"}, + {"Pointer", Type, 11, ""}, + {"PostQueuedCompletionStatus", Func, 0, ""}, + {"Pread", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"}, + {"Proc", Type, 0, ""}, + {"Proc.Dll", Field, 0, ""}, + {"Proc.Name", Field, 0, ""}, + {"ProcAttr", Type, 0, ""}, + {"ProcAttr.Dir", Field, 0, ""}, + {"ProcAttr.Env", Field, 0, ""}, + {"ProcAttr.Files", Field, 0, ""}, + {"ProcAttr.Sys", Field, 0, ""}, + {"Process32First", Func, 4, ""}, + {"Process32Next", Func, 4, ""}, + {"ProcessEntry32", Type, 4, ""}, + {"ProcessEntry32.DefaultHeapID", Field, 4, ""}, + {"ProcessEntry32.ExeFile", Field, 4, ""}, + {"ProcessEntry32.Flags", Field, 4, ""}, + {"ProcessEntry32.ModuleID", Field, 4, ""}, + {"ProcessEntry32.ParentProcessID", Field, 4, ""}, + {"ProcessEntry32.PriClassBase", Field, 4, ""}, + {"ProcessEntry32.ProcessID", Field, 4, ""}, + {"ProcessEntry32.Size", Field, 4, ""}, + {"ProcessEntry32.Threads", Field, 4, ""}, + {"ProcessEntry32.Usage", Field, 4, ""}, + {"ProcessInformation", Type, 0, ""}, + {"ProcessInformation.Process", Field, 0, ""}, + {"ProcessInformation.ProcessId", Field, 0, ""}, + {"ProcessInformation.Thread", Field, 0, ""}, + {"ProcessInformation.ThreadId", Field, 0, ""}, + {"Protoent", Type, 0, ""}, + {"Protoent.Aliases", Field, 0, ""}, + {"Protoent.Name", Field, 0, ""}, + {"Protoent.Proto", Field, 0, ""}, + {"PtraceAttach", Func, 0, "func(pid int) (err error)"}, + {"PtraceCont", Func, 0, "func(pid int, signal int) (err error)"}, + {"PtraceDetach", Func, 0, "func(pid int) (err error)"}, + {"PtraceGetEventMsg", Func, 0, "func(pid int) (msg uint, err error)"}, + {"PtraceGetRegs", Func, 0, "func(pid int, regsout *PtraceRegs) (err error)"}, + {"PtracePeekData", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"}, + {"PtracePeekText", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"}, + {"PtracePokeData", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"}, + {"PtracePokeText", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"}, + {"PtraceRegs", Type, 0, ""}, + {"PtraceRegs.Cs", Field, 0, ""}, + {"PtraceRegs.Ds", Field, 0, ""}, + {"PtraceRegs.Eax", Field, 0, ""}, + {"PtraceRegs.Ebp", Field, 0, ""}, + {"PtraceRegs.Ebx", Field, 0, ""}, + {"PtraceRegs.Ecx", Field, 0, ""}, + {"PtraceRegs.Edi", Field, 0, ""}, + {"PtraceRegs.Edx", Field, 0, ""}, + {"PtraceRegs.Eflags", Field, 0, ""}, + {"PtraceRegs.Eip", Field, 0, ""}, + {"PtraceRegs.Es", Field, 0, ""}, + {"PtraceRegs.Esi", Field, 0, ""}, + {"PtraceRegs.Esp", Field, 0, ""}, + {"PtraceRegs.Fs", Field, 0, ""}, + {"PtraceRegs.Fs_base", Field, 0, ""}, + {"PtraceRegs.Gs", Field, 0, ""}, + {"PtraceRegs.Gs_base", Field, 0, ""}, + {"PtraceRegs.Orig_eax", Field, 0, ""}, + {"PtraceRegs.Orig_rax", Field, 0, ""}, + {"PtraceRegs.R10", Field, 0, ""}, + {"PtraceRegs.R11", Field, 0, ""}, + {"PtraceRegs.R12", Field, 0, ""}, + {"PtraceRegs.R13", Field, 0, ""}, + {"PtraceRegs.R14", Field, 0, ""}, + {"PtraceRegs.R15", Field, 0, ""}, + {"PtraceRegs.R8", Field, 0, ""}, + {"PtraceRegs.R9", Field, 0, ""}, + {"PtraceRegs.Rax", Field, 0, ""}, + {"PtraceRegs.Rbp", Field, 0, ""}, + {"PtraceRegs.Rbx", Field, 0, ""}, + {"PtraceRegs.Rcx", Field, 0, ""}, + {"PtraceRegs.Rdi", Field, 0, ""}, + {"PtraceRegs.Rdx", Field, 0, ""}, + {"PtraceRegs.Rip", Field, 0, ""}, + {"PtraceRegs.Rsi", Field, 0, ""}, + {"PtraceRegs.Rsp", Field, 0, ""}, + {"PtraceRegs.Ss", Field, 0, ""}, + {"PtraceRegs.Uregs", Field, 0, ""}, + {"PtraceRegs.Xcs", Field, 0, ""}, + {"PtraceRegs.Xds", Field, 0, ""}, + {"PtraceRegs.Xes", Field, 0, ""}, + {"PtraceRegs.Xfs", Field, 0, ""}, + {"PtraceRegs.Xgs", Field, 0, ""}, + {"PtraceRegs.Xss", Field, 0, ""}, + {"PtraceSetOptions", Func, 0, "func(pid int, options int) (err error)"}, + {"PtraceSetRegs", Func, 0, "func(pid int, regs *PtraceRegs) (err error)"}, + {"PtraceSingleStep", Func, 0, "func(pid int) (err error)"}, + {"PtraceSyscall", Func, 1, "func(pid int, signal int) (err error)"}, + {"Pwrite", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"}, + {"REG_BINARY", Const, 0, ""}, + {"REG_DWORD", Const, 0, ""}, + {"REG_DWORD_BIG_ENDIAN", Const, 0, ""}, + {"REG_DWORD_LITTLE_ENDIAN", Const, 0, ""}, + {"REG_EXPAND_SZ", Const, 0, ""}, + {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0, ""}, + {"REG_LINK", Const, 0, ""}, + {"REG_MULTI_SZ", Const, 0, ""}, + {"REG_NONE", Const, 0, ""}, + {"REG_QWORD", Const, 0, ""}, + {"REG_QWORD_LITTLE_ENDIAN", Const, 0, ""}, + {"REG_RESOURCE_LIST", Const, 0, ""}, + {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0, ""}, + {"REG_SZ", Const, 0, ""}, + {"RLIMIT_AS", Const, 0, ""}, + {"RLIMIT_CORE", Const, 0, ""}, + {"RLIMIT_CPU", Const, 0, ""}, + {"RLIMIT_CPU_USAGE_MONITOR", Const, 16, ""}, + {"RLIMIT_DATA", Const, 0, ""}, + {"RLIMIT_FSIZE", Const, 0, ""}, + {"RLIMIT_NOFILE", Const, 0, ""}, + {"RLIMIT_STACK", Const, 0, ""}, + {"RLIM_INFINITY", Const, 0, ""}, + {"RTAX_ADVMSS", Const, 0, ""}, + {"RTAX_AUTHOR", Const, 0, ""}, + {"RTAX_BRD", Const, 0, ""}, + {"RTAX_CWND", Const, 0, ""}, + {"RTAX_DST", Const, 0, ""}, + {"RTAX_FEATURES", Const, 0, ""}, + {"RTAX_FEATURE_ALLFRAG", Const, 0, ""}, + {"RTAX_FEATURE_ECN", Const, 0, ""}, + {"RTAX_FEATURE_SACK", Const, 0, ""}, + {"RTAX_FEATURE_TIMESTAMP", Const, 0, ""}, + {"RTAX_GATEWAY", Const, 0, ""}, + {"RTAX_GENMASK", Const, 0, ""}, + {"RTAX_HOPLIMIT", Const, 0, ""}, + {"RTAX_IFA", Const, 0, ""}, + {"RTAX_IFP", Const, 0, ""}, + {"RTAX_INITCWND", Const, 0, ""}, + {"RTAX_INITRWND", Const, 0, ""}, + {"RTAX_LABEL", Const, 1, ""}, + {"RTAX_LOCK", Const, 0, ""}, + {"RTAX_MAX", Const, 0, ""}, + {"RTAX_MTU", Const, 0, ""}, + {"RTAX_NETMASK", Const, 0, ""}, + {"RTAX_REORDERING", Const, 0, ""}, + {"RTAX_RTO_MIN", Const, 0, ""}, + {"RTAX_RTT", Const, 0, ""}, + {"RTAX_RTTVAR", Const, 0, ""}, + {"RTAX_SRC", Const, 1, ""}, + {"RTAX_SRCMASK", Const, 1, ""}, + {"RTAX_SSTHRESH", Const, 0, ""}, + {"RTAX_TAG", Const, 1, ""}, + {"RTAX_UNSPEC", Const, 0, ""}, + {"RTAX_WINDOW", Const, 0, ""}, + {"RTA_ALIGNTO", Const, 0, ""}, + {"RTA_AUTHOR", Const, 0, ""}, + {"RTA_BRD", Const, 0, ""}, + {"RTA_CACHEINFO", Const, 0, ""}, + {"RTA_DST", Const, 0, ""}, + {"RTA_FLOW", Const, 0, ""}, + {"RTA_GATEWAY", Const, 0, ""}, + {"RTA_GENMASK", Const, 0, ""}, + {"RTA_IFA", Const, 0, ""}, + {"RTA_IFP", Const, 0, ""}, + {"RTA_IIF", Const, 0, ""}, + {"RTA_LABEL", Const, 1, ""}, + {"RTA_MAX", Const, 0, ""}, + {"RTA_METRICS", Const, 0, ""}, + {"RTA_MULTIPATH", Const, 0, ""}, + {"RTA_NETMASK", Const, 0, ""}, + {"RTA_OIF", Const, 0, ""}, + {"RTA_PREFSRC", Const, 0, ""}, + {"RTA_PRIORITY", Const, 0, ""}, + {"RTA_SRC", Const, 0, ""}, + {"RTA_SRCMASK", Const, 1, ""}, + {"RTA_TABLE", Const, 0, ""}, + {"RTA_TAG", Const, 1, ""}, + {"RTA_UNSPEC", Const, 0, ""}, + {"RTCF_DIRECTSRC", Const, 0, ""}, + {"RTCF_DOREDIRECT", Const, 0, ""}, + {"RTCF_LOG", Const, 0, ""}, + {"RTCF_MASQ", Const, 0, ""}, + {"RTCF_NAT", Const, 0, ""}, + {"RTCF_VALVE", Const, 0, ""}, + {"RTF_ADDRCLASSMASK", Const, 0, ""}, + {"RTF_ADDRCONF", Const, 0, ""}, + {"RTF_ALLONLINK", Const, 0, ""}, + {"RTF_ANNOUNCE", Const, 1, ""}, + {"RTF_BLACKHOLE", Const, 0, ""}, + {"RTF_BROADCAST", Const, 0, ""}, + {"RTF_CACHE", Const, 0, ""}, + {"RTF_CLONED", Const, 1, ""}, + {"RTF_CLONING", Const, 0, ""}, + {"RTF_CONDEMNED", Const, 0, ""}, + {"RTF_DEFAULT", Const, 0, ""}, + {"RTF_DELCLONE", Const, 0, ""}, + {"RTF_DONE", Const, 0, ""}, + {"RTF_DYNAMIC", Const, 0, ""}, + {"RTF_FLOW", Const, 0, ""}, + {"RTF_FMASK", Const, 0, ""}, + {"RTF_GATEWAY", Const, 0, ""}, + {"RTF_GWFLAG_COMPAT", Const, 3, ""}, + {"RTF_HOST", Const, 0, ""}, + {"RTF_IFREF", Const, 0, ""}, + {"RTF_IFSCOPE", Const, 0, ""}, + {"RTF_INTERFACE", Const, 0, ""}, + {"RTF_IRTT", Const, 0, ""}, + {"RTF_LINKRT", Const, 0, ""}, + {"RTF_LLDATA", Const, 0, ""}, + {"RTF_LLINFO", Const, 0, ""}, + {"RTF_LOCAL", Const, 0, ""}, + {"RTF_MASK", Const, 1, ""}, + {"RTF_MODIFIED", Const, 0, ""}, + {"RTF_MPATH", Const, 1, ""}, + {"RTF_MPLS", Const, 1, ""}, + {"RTF_MSS", Const, 0, ""}, + {"RTF_MTU", Const, 0, ""}, + {"RTF_MULTICAST", Const, 0, ""}, + {"RTF_NAT", Const, 0, ""}, + {"RTF_NOFORWARD", Const, 0, ""}, + {"RTF_NONEXTHOP", Const, 0, ""}, + {"RTF_NOPMTUDISC", Const, 0, ""}, + {"RTF_PERMANENT_ARP", Const, 1, ""}, + {"RTF_PINNED", Const, 0, ""}, + {"RTF_POLICY", Const, 0, ""}, + {"RTF_PRCLONING", Const, 0, ""}, + {"RTF_PROTO1", Const, 0, ""}, + {"RTF_PROTO2", Const, 0, ""}, + {"RTF_PROTO3", Const, 0, ""}, + {"RTF_PROXY", Const, 16, ""}, + {"RTF_REINSTATE", Const, 0, ""}, + {"RTF_REJECT", Const, 0, ""}, + {"RTF_RNH_LOCKED", Const, 0, ""}, + {"RTF_ROUTER", Const, 16, ""}, + {"RTF_SOURCE", Const, 1, ""}, + {"RTF_SRC", Const, 1, ""}, + {"RTF_STATIC", Const, 0, ""}, + {"RTF_STICKY", Const, 0, ""}, + {"RTF_THROW", Const, 0, ""}, + {"RTF_TUNNEL", Const, 1, ""}, + {"RTF_UP", Const, 0, ""}, + {"RTF_USETRAILERS", Const, 1, ""}, + {"RTF_WASCLONED", Const, 0, ""}, + {"RTF_WINDOW", Const, 0, ""}, + {"RTF_XRESOLVE", Const, 0, ""}, + {"RTM_ADD", Const, 0, ""}, + {"RTM_BASE", Const, 0, ""}, + {"RTM_CHANGE", Const, 0, ""}, + {"RTM_CHGADDR", Const, 1, ""}, + {"RTM_DELACTION", Const, 0, ""}, + {"RTM_DELADDR", Const, 0, ""}, + {"RTM_DELADDRLABEL", Const, 0, ""}, + {"RTM_DELETE", Const, 0, ""}, + {"RTM_DELLINK", Const, 0, ""}, + {"RTM_DELMADDR", Const, 0, ""}, + {"RTM_DELNEIGH", Const, 0, ""}, + {"RTM_DELQDISC", Const, 0, ""}, + {"RTM_DELROUTE", Const, 0, ""}, + {"RTM_DELRULE", Const, 0, ""}, + {"RTM_DELTCLASS", Const, 0, ""}, + {"RTM_DELTFILTER", Const, 0, ""}, + {"RTM_DESYNC", Const, 1, ""}, + {"RTM_F_CLONED", Const, 0, ""}, + {"RTM_F_EQUALIZE", Const, 0, ""}, + {"RTM_F_NOTIFY", Const, 0, ""}, + {"RTM_F_PREFIX", Const, 0, ""}, + {"RTM_GET", Const, 0, ""}, + {"RTM_GET2", Const, 0, ""}, + {"RTM_GETACTION", Const, 0, ""}, + {"RTM_GETADDR", Const, 0, ""}, + {"RTM_GETADDRLABEL", Const, 0, ""}, + {"RTM_GETANYCAST", Const, 0, ""}, + {"RTM_GETDCB", Const, 0, ""}, + {"RTM_GETLINK", Const, 0, ""}, + {"RTM_GETMULTICAST", Const, 0, ""}, + {"RTM_GETNEIGH", Const, 0, ""}, + {"RTM_GETNEIGHTBL", Const, 0, ""}, + {"RTM_GETQDISC", Const, 0, ""}, + {"RTM_GETROUTE", Const, 0, ""}, + {"RTM_GETRULE", Const, 0, ""}, + {"RTM_GETTCLASS", Const, 0, ""}, + {"RTM_GETTFILTER", Const, 0, ""}, + {"RTM_IEEE80211", Const, 0, ""}, + {"RTM_IFANNOUNCE", Const, 0, ""}, + {"RTM_IFINFO", Const, 0, ""}, + {"RTM_IFINFO2", Const, 0, ""}, + {"RTM_LLINFO_UPD", Const, 1, ""}, + {"RTM_LOCK", Const, 0, ""}, + {"RTM_LOSING", Const, 0, ""}, + {"RTM_MAX", Const, 0, ""}, + {"RTM_MAXSIZE", Const, 1, ""}, + {"RTM_MISS", Const, 0, ""}, + {"RTM_NEWACTION", Const, 0, ""}, + {"RTM_NEWADDR", Const, 0, ""}, + {"RTM_NEWADDRLABEL", Const, 0, ""}, + {"RTM_NEWLINK", Const, 0, ""}, + {"RTM_NEWMADDR", Const, 0, ""}, + {"RTM_NEWMADDR2", Const, 0, ""}, + {"RTM_NEWNDUSEROPT", Const, 0, ""}, + {"RTM_NEWNEIGH", Const, 0, ""}, + {"RTM_NEWNEIGHTBL", Const, 0, ""}, + {"RTM_NEWPREFIX", Const, 0, ""}, + {"RTM_NEWQDISC", Const, 0, ""}, + {"RTM_NEWROUTE", Const, 0, ""}, + {"RTM_NEWRULE", Const, 0, ""}, + {"RTM_NEWTCLASS", Const, 0, ""}, + {"RTM_NEWTFILTER", Const, 0, ""}, + {"RTM_NR_FAMILIES", Const, 0, ""}, + {"RTM_NR_MSGTYPES", Const, 0, ""}, + {"RTM_OIFINFO", Const, 1, ""}, + {"RTM_OLDADD", Const, 0, ""}, + {"RTM_OLDDEL", Const, 0, ""}, + {"RTM_OOIFINFO", Const, 1, ""}, + {"RTM_REDIRECT", Const, 0, ""}, + {"RTM_RESOLVE", Const, 0, ""}, + {"RTM_RTTUNIT", Const, 0, ""}, + {"RTM_SETDCB", Const, 0, ""}, + {"RTM_SETGATE", Const, 1, ""}, + {"RTM_SETLINK", Const, 0, ""}, + {"RTM_SETNEIGHTBL", Const, 0, ""}, + {"RTM_VERSION", Const, 0, ""}, + {"RTNH_ALIGNTO", Const, 0, ""}, + {"RTNH_F_DEAD", Const, 0, ""}, + {"RTNH_F_ONLINK", Const, 0, ""}, + {"RTNH_F_PERVASIVE", Const, 0, ""}, + {"RTNLGRP_IPV4_IFADDR", Const, 1, ""}, + {"RTNLGRP_IPV4_MROUTE", Const, 1, ""}, + {"RTNLGRP_IPV4_ROUTE", Const, 1, ""}, + {"RTNLGRP_IPV4_RULE", Const, 1, ""}, + {"RTNLGRP_IPV6_IFADDR", Const, 1, ""}, + {"RTNLGRP_IPV6_IFINFO", Const, 1, ""}, + {"RTNLGRP_IPV6_MROUTE", Const, 1, ""}, + {"RTNLGRP_IPV6_PREFIX", Const, 1, ""}, + {"RTNLGRP_IPV6_ROUTE", Const, 1, ""}, + {"RTNLGRP_IPV6_RULE", Const, 1, ""}, + {"RTNLGRP_LINK", Const, 1, ""}, + {"RTNLGRP_ND_USEROPT", Const, 1, ""}, + {"RTNLGRP_NEIGH", Const, 1, ""}, + {"RTNLGRP_NONE", Const, 1, ""}, + {"RTNLGRP_NOTIFY", Const, 1, ""}, + {"RTNLGRP_TC", Const, 1, ""}, + {"RTN_ANYCAST", Const, 0, ""}, + {"RTN_BLACKHOLE", Const, 0, ""}, + {"RTN_BROADCAST", Const, 0, ""}, + {"RTN_LOCAL", Const, 0, ""}, + {"RTN_MAX", Const, 0, ""}, + {"RTN_MULTICAST", Const, 0, ""}, + {"RTN_NAT", Const, 0, ""}, + {"RTN_PROHIBIT", Const, 0, ""}, + {"RTN_THROW", Const, 0, ""}, + {"RTN_UNICAST", Const, 0, ""}, + {"RTN_UNREACHABLE", Const, 0, ""}, + {"RTN_UNSPEC", Const, 0, ""}, + {"RTN_XRESOLVE", Const, 0, ""}, + {"RTPROT_BIRD", Const, 0, ""}, + {"RTPROT_BOOT", Const, 0, ""}, + {"RTPROT_DHCP", Const, 0, ""}, + {"RTPROT_DNROUTED", Const, 0, ""}, + {"RTPROT_GATED", Const, 0, ""}, + {"RTPROT_KERNEL", Const, 0, ""}, + {"RTPROT_MRT", Const, 0, ""}, + {"RTPROT_NTK", Const, 0, ""}, + {"RTPROT_RA", Const, 0, ""}, + {"RTPROT_REDIRECT", Const, 0, ""}, + {"RTPROT_STATIC", Const, 0, ""}, + {"RTPROT_UNSPEC", Const, 0, ""}, + {"RTPROT_XORP", Const, 0, ""}, + {"RTPROT_ZEBRA", Const, 0, ""}, + {"RTV_EXPIRE", Const, 0, ""}, + {"RTV_HOPCOUNT", Const, 0, ""}, + {"RTV_MTU", Const, 0, ""}, + {"RTV_RPIPE", Const, 0, ""}, + {"RTV_RTT", Const, 0, ""}, + {"RTV_RTTVAR", Const, 0, ""}, + {"RTV_SPIPE", Const, 0, ""}, + {"RTV_SSTHRESH", Const, 0, ""}, + {"RTV_WEIGHT", Const, 0, ""}, + {"RT_CACHING_CONTEXT", Const, 1, ""}, + {"RT_CLASS_DEFAULT", Const, 0, ""}, + {"RT_CLASS_LOCAL", Const, 0, ""}, + {"RT_CLASS_MAIN", Const, 0, ""}, + {"RT_CLASS_MAX", Const, 0, ""}, + {"RT_CLASS_UNSPEC", Const, 0, ""}, + {"RT_DEFAULT_FIB", Const, 1, ""}, + {"RT_NORTREF", Const, 1, ""}, + {"RT_SCOPE_HOST", Const, 0, ""}, + {"RT_SCOPE_LINK", Const, 0, ""}, + {"RT_SCOPE_NOWHERE", Const, 0, ""}, + {"RT_SCOPE_SITE", Const, 0, ""}, + {"RT_SCOPE_UNIVERSE", Const, 0, ""}, + {"RT_TABLEID_MAX", Const, 1, ""}, + {"RT_TABLE_COMPAT", Const, 0, ""}, + {"RT_TABLE_DEFAULT", Const, 0, ""}, + {"RT_TABLE_LOCAL", Const, 0, ""}, + {"RT_TABLE_MAIN", Const, 0, ""}, + {"RT_TABLE_MAX", Const, 0, ""}, + {"RT_TABLE_UNSPEC", Const, 0, ""}, + {"RUSAGE_CHILDREN", Const, 0, ""}, + {"RUSAGE_SELF", Const, 0, ""}, + {"RUSAGE_THREAD", Const, 0, ""}, + {"Radvisory_t", Type, 0, ""}, + {"Radvisory_t.Count", Field, 0, ""}, + {"Radvisory_t.Offset", Field, 0, ""}, + {"Radvisory_t.Pad_cgo_0", Field, 0, ""}, + {"RawConn", Type, 9, ""}, + {"RawSockaddr", Type, 0, ""}, + {"RawSockaddr.Data", Field, 0, ""}, + {"RawSockaddr.Family", Field, 0, ""}, + {"RawSockaddr.Len", Field, 0, ""}, + {"RawSockaddrAny", Type, 0, ""}, + {"RawSockaddrAny.Addr", Field, 0, ""}, + {"RawSockaddrAny.Pad", Field, 0, ""}, + {"RawSockaddrDatalink", Type, 0, ""}, + {"RawSockaddrDatalink.Alen", Field, 0, ""}, + {"RawSockaddrDatalink.Data", Field, 0, ""}, + {"RawSockaddrDatalink.Family", Field, 0, ""}, + {"RawSockaddrDatalink.Index", Field, 0, ""}, + {"RawSockaddrDatalink.Len", Field, 0, ""}, + {"RawSockaddrDatalink.Nlen", Field, 0, ""}, + {"RawSockaddrDatalink.Pad_cgo_0", Field, 2, ""}, + {"RawSockaddrDatalink.Slen", Field, 0, ""}, + {"RawSockaddrDatalink.Type", Field, 0, ""}, + {"RawSockaddrInet4", Type, 0, ""}, + {"RawSockaddrInet4.Addr", Field, 0, ""}, + {"RawSockaddrInet4.Family", Field, 0, ""}, + {"RawSockaddrInet4.Len", Field, 0, ""}, + {"RawSockaddrInet4.Port", Field, 0, ""}, + {"RawSockaddrInet4.Zero", Field, 0, ""}, + {"RawSockaddrInet6", Type, 0, ""}, + {"RawSockaddrInet6.Addr", Field, 0, ""}, + {"RawSockaddrInet6.Family", Field, 0, ""}, + {"RawSockaddrInet6.Flowinfo", Field, 0, ""}, + {"RawSockaddrInet6.Len", Field, 0, ""}, + {"RawSockaddrInet6.Port", Field, 0, ""}, + {"RawSockaddrInet6.Scope_id", Field, 0, ""}, + {"RawSockaddrLinklayer", Type, 0, ""}, + {"RawSockaddrLinklayer.Addr", Field, 0, ""}, + {"RawSockaddrLinklayer.Family", Field, 0, ""}, + {"RawSockaddrLinklayer.Halen", Field, 0, ""}, + {"RawSockaddrLinklayer.Hatype", Field, 0, ""}, + {"RawSockaddrLinklayer.Ifindex", Field, 0, ""}, + {"RawSockaddrLinklayer.Pkttype", Field, 0, ""}, + {"RawSockaddrLinklayer.Protocol", Field, 0, ""}, + {"RawSockaddrNetlink", Type, 0, ""}, + {"RawSockaddrNetlink.Family", Field, 0, ""}, + {"RawSockaddrNetlink.Groups", Field, 0, ""}, + {"RawSockaddrNetlink.Pad", Field, 0, ""}, + {"RawSockaddrNetlink.Pid", Field, 0, ""}, + {"RawSockaddrUnix", Type, 0, ""}, + {"RawSockaddrUnix.Family", Field, 0, ""}, + {"RawSockaddrUnix.Len", Field, 0, ""}, + {"RawSockaddrUnix.Pad_cgo_0", Field, 2, ""}, + {"RawSockaddrUnix.Path", Field, 0, ""}, + {"RawSyscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"}, + {"RawSyscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"}, + {"Read", Func, 0, "func(fd int, p []byte) (n int, err error)"}, + {"ReadConsole", Func, 1, ""}, + {"ReadDirectoryChanges", Func, 0, ""}, + {"ReadDirent", Func, 0, "func(fd int, buf []byte) (n int, err error)"}, + {"ReadFile", Func, 0, ""}, + {"Readlink", Func, 0, "func(path string, buf []byte) (n int, err error)"}, + {"Reboot", Func, 0, "func(cmd int) (err error)"}, + {"Recvfrom", Func, 0, "func(fd int, p []byte, flags int) (n int, from Sockaddr, err error)"}, + {"Recvmsg", Func, 0, "func(fd int, p []byte, oob []byte, flags int) (n int, oobn int, recvflags int, from Sockaddr, err error)"}, + {"RegCloseKey", Func, 0, ""}, + {"RegEnumKeyEx", Func, 0, ""}, + {"RegOpenKeyEx", Func, 0, ""}, + {"RegQueryInfoKey", Func, 0, ""}, + {"RegQueryValueEx", Func, 0, ""}, + {"RemoveDirectory", Func, 0, ""}, + {"Removexattr", Func, 1, "func(path string, attr string) (err error)"}, + {"Rename", Func, 0, "func(oldpath string, newpath string) (err error)"}, + {"Renameat", Func, 0, "func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)"}, + {"Revoke", Func, 0, ""}, + {"Rlimit", Type, 0, ""}, + {"Rlimit.Cur", Field, 0, ""}, + {"Rlimit.Max", Field, 0, ""}, + {"Rmdir", Func, 0, "func(path string) error"}, + {"RouteMessage", Type, 0, ""}, + {"RouteMessage.Data", Field, 0, ""}, + {"RouteMessage.Header", Field, 0, ""}, + {"RouteRIB", Func, 0, ""}, + {"RoutingMessage", Type, 0, ""}, + {"RtAttr", Type, 0, ""}, + {"RtAttr.Len", Field, 0, ""}, + {"RtAttr.Type", Field, 0, ""}, + {"RtGenmsg", Type, 0, ""}, + {"RtGenmsg.Family", Field, 0, ""}, + {"RtMetrics", Type, 0, ""}, + {"RtMetrics.Expire", Field, 0, ""}, + {"RtMetrics.Filler", Field, 0, ""}, + {"RtMetrics.Hopcount", Field, 0, ""}, + {"RtMetrics.Locks", Field, 0, ""}, + {"RtMetrics.Mtu", Field, 0, ""}, + {"RtMetrics.Pad", Field, 3, ""}, + {"RtMetrics.Pksent", Field, 0, ""}, + {"RtMetrics.Recvpipe", Field, 0, ""}, + {"RtMetrics.Refcnt", Field, 2, ""}, + {"RtMetrics.Rtt", Field, 0, ""}, + {"RtMetrics.Rttvar", Field, 0, ""}, + {"RtMetrics.Sendpipe", Field, 0, ""}, + {"RtMetrics.Ssthresh", Field, 0, ""}, + {"RtMetrics.Weight", Field, 0, ""}, + {"RtMsg", Type, 0, ""}, + {"RtMsg.Dst_len", Field, 0, ""}, + {"RtMsg.Family", Field, 0, ""}, + {"RtMsg.Flags", Field, 0, ""}, + {"RtMsg.Protocol", Field, 0, ""}, + {"RtMsg.Scope", Field, 0, ""}, + {"RtMsg.Src_len", Field, 0, ""}, + {"RtMsg.Table", Field, 0, ""}, + {"RtMsg.Tos", Field, 0, ""}, + {"RtMsg.Type", Field, 0, ""}, + {"RtMsghdr", Type, 0, ""}, + {"RtMsghdr.Addrs", Field, 0, ""}, + {"RtMsghdr.Errno", Field, 0, ""}, + {"RtMsghdr.Flags", Field, 0, ""}, + {"RtMsghdr.Fmask", Field, 0, ""}, + {"RtMsghdr.Hdrlen", Field, 2, ""}, + {"RtMsghdr.Index", Field, 0, ""}, + {"RtMsghdr.Inits", Field, 0, ""}, + {"RtMsghdr.Mpls", Field, 2, ""}, + {"RtMsghdr.Msglen", Field, 0, ""}, + {"RtMsghdr.Pad_cgo_0", Field, 0, ""}, + {"RtMsghdr.Pad_cgo_1", Field, 2, ""}, + {"RtMsghdr.Pid", Field, 0, ""}, + {"RtMsghdr.Priority", Field, 2, ""}, + {"RtMsghdr.Rmx", Field, 0, ""}, + {"RtMsghdr.Seq", Field, 0, ""}, + {"RtMsghdr.Tableid", Field, 2, ""}, + {"RtMsghdr.Type", Field, 0, ""}, + {"RtMsghdr.Use", Field, 0, ""}, + {"RtMsghdr.Version", Field, 0, ""}, + {"RtNexthop", Type, 0, ""}, + {"RtNexthop.Flags", Field, 0, ""}, + {"RtNexthop.Hops", Field, 0, ""}, + {"RtNexthop.Ifindex", Field, 0, ""}, + {"RtNexthop.Len", Field, 0, ""}, + {"Rusage", Type, 0, ""}, + {"Rusage.CreationTime", Field, 0, ""}, + {"Rusage.ExitTime", Field, 0, ""}, + {"Rusage.Idrss", Field, 0, ""}, + {"Rusage.Inblock", Field, 0, ""}, + {"Rusage.Isrss", Field, 0, ""}, + {"Rusage.Ixrss", Field, 0, ""}, + {"Rusage.KernelTime", Field, 0, ""}, + {"Rusage.Majflt", Field, 0, ""}, + {"Rusage.Maxrss", Field, 0, ""}, + {"Rusage.Minflt", Field, 0, ""}, + {"Rusage.Msgrcv", Field, 0, ""}, + {"Rusage.Msgsnd", Field, 0, ""}, + {"Rusage.Nivcsw", Field, 0, ""}, + {"Rusage.Nsignals", Field, 0, ""}, + {"Rusage.Nswap", Field, 0, ""}, + {"Rusage.Nvcsw", Field, 0, ""}, + {"Rusage.Oublock", Field, 0, ""}, + {"Rusage.Stime", Field, 0, ""}, + {"Rusage.UserTime", Field, 0, ""}, + {"Rusage.Utime", Field, 0, ""}, + {"SCM_BINTIME", Const, 0, ""}, + {"SCM_CREDENTIALS", Const, 0, ""}, + {"SCM_CREDS", Const, 0, ""}, + {"SCM_RIGHTS", Const, 0, ""}, + {"SCM_TIMESTAMP", Const, 0, ""}, + {"SCM_TIMESTAMPING", Const, 0, ""}, + {"SCM_TIMESTAMPNS", Const, 0, ""}, + {"SCM_TIMESTAMP_MONOTONIC", Const, 0, ""}, + {"SHUT_RD", Const, 0, ""}, + {"SHUT_RDWR", Const, 0, ""}, + {"SHUT_WR", Const, 0, ""}, + {"SID", Type, 0, ""}, + {"SIDAndAttributes", Type, 0, ""}, + {"SIDAndAttributes.Attributes", Field, 0, ""}, + {"SIDAndAttributes.Sid", Field, 0, ""}, + {"SIGABRT", Const, 0, ""}, + {"SIGALRM", Const, 0, ""}, + {"SIGBUS", Const, 0, ""}, + {"SIGCHLD", Const, 0, ""}, + {"SIGCLD", Const, 0, ""}, + {"SIGCONT", Const, 0, ""}, + {"SIGEMT", Const, 0, ""}, + {"SIGFPE", Const, 0, ""}, + {"SIGHUP", Const, 0, ""}, + {"SIGILL", Const, 0, ""}, + {"SIGINFO", Const, 0, ""}, + {"SIGINT", Const, 0, ""}, + {"SIGIO", Const, 0, ""}, + {"SIGIOT", Const, 0, ""}, + {"SIGKILL", Const, 0, ""}, + {"SIGLIBRT", Const, 1, ""}, + {"SIGLWP", Const, 0, ""}, + {"SIGPIPE", Const, 0, ""}, + {"SIGPOLL", Const, 0, ""}, + {"SIGPROF", Const, 0, ""}, + {"SIGPWR", Const, 0, ""}, + {"SIGQUIT", Const, 0, ""}, + {"SIGSEGV", Const, 0, ""}, + {"SIGSTKFLT", Const, 0, ""}, + {"SIGSTOP", Const, 0, ""}, + {"SIGSYS", Const, 0, ""}, + {"SIGTERM", Const, 0, ""}, + {"SIGTHR", Const, 0, ""}, + {"SIGTRAP", Const, 0, ""}, + {"SIGTSTP", Const, 0, ""}, + {"SIGTTIN", Const, 0, ""}, + {"SIGTTOU", Const, 0, ""}, + {"SIGUNUSED", Const, 0, ""}, + {"SIGURG", Const, 0, ""}, + {"SIGUSR1", Const, 0, ""}, + {"SIGUSR2", Const, 0, ""}, + {"SIGVTALRM", Const, 0, ""}, + {"SIGWINCH", Const, 0, ""}, + {"SIGXCPU", Const, 0, ""}, + {"SIGXFSZ", Const, 0, ""}, + {"SIOCADDDLCI", Const, 0, ""}, + {"SIOCADDMULTI", Const, 0, ""}, + {"SIOCADDRT", Const, 0, ""}, + {"SIOCAIFADDR", Const, 0, ""}, + {"SIOCAIFGROUP", Const, 0, ""}, + {"SIOCALIFADDR", Const, 0, ""}, + {"SIOCARPIPLL", Const, 0, ""}, + {"SIOCATMARK", Const, 0, ""}, + {"SIOCAUTOADDR", Const, 0, ""}, + {"SIOCAUTONETMASK", Const, 0, ""}, + {"SIOCBRDGADD", Const, 1, ""}, + {"SIOCBRDGADDS", Const, 1, ""}, + {"SIOCBRDGARL", Const, 1, ""}, + {"SIOCBRDGDADDR", Const, 1, ""}, + {"SIOCBRDGDEL", Const, 1, ""}, + {"SIOCBRDGDELS", Const, 1, ""}, + {"SIOCBRDGFLUSH", Const, 1, ""}, + {"SIOCBRDGFRL", Const, 1, ""}, + {"SIOCBRDGGCACHE", Const, 1, ""}, + {"SIOCBRDGGFD", Const, 1, ""}, + {"SIOCBRDGGHT", Const, 1, ""}, + {"SIOCBRDGGIFFLGS", Const, 1, ""}, + {"SIOCBRDGGMA", Const, 1, ""}, + {"SIOCBRDGGPARAM", Const, 1, ""}, + {"SIOCBRDGGPRI", Const, 1, ""}, + {"SIOCBRDGGRL", Const, 1, ""}, + {"SIOCBRDGGSIFS", Const, 1, ""}, + {"SIOCBRDGGTO", Const, 1, ""}, + {"SIOCBRDGIFS", Const, 1, ""}, + {"SIOCBRDGRTS", Const, 1, ""}, + {"SIOCBRDGSADDR", Const, 1, ""}, + {"SIOCBRDGSCACHE", Const, 1, ""}, + {"SIOCBRDGSFD", Const, 1, ""}, + {"SIOCBRDGSHT", Const, 1, ""}, + {"SIOCBRDGSIFCOST", Const, 1, ""}, + {"SIOCBRDGSIFFLGS", Const, 1, ""}, + {"SIOCBRDGSIFPRIO", Const, 1, ""}, + {"SIOCBRDGSMA", Const, 1, ""}, + {"SIOCBRDGSPRI", Const, 1, ""}, + {"SIOCBRDGSPROTO", Const, 1, ""}, + {"SIOCBRDGSTO", Const, 1, ""}, + {"SIOCBRDGSTXHC", Const, 1, ""}, + {"SIOCDARP", Const, 0, ""}, + {"SIOCDELDLCI", Const, 0, ""}, + {"SIOCDELMULTI", Const, 0, ""}, + {"SIOCDELRT", Const, 0, ""}, + {"SIOCDEVPRIVATE", Const, 0, ""}, + {"SIOCDIFADDR", Const, 0, ""}, + {"SIOCDIFGROUP", Const, 0, ""}, + {"SIOCDIFPHYADDR", Const, 0, ""}, + {"SIOCDLIFADDR", Const, 0, ""}, + {"SIOCDRARP", Const, 0, ""}, + {"SIOCGARP", Const, 0, ""}, + {"SIOCGDRVSPEC", Const, 0, ""}, + {"SIOCGETKALIVE", Const, 1, ""}, + {"SIOCGETLABEL", Const, 1, ""}, + {"SIOCGETPFLOW", Const, 1, ""}, + {"SIOCGETPFSYNC", Const, 1, ""}, + {"SIOCGETSGCNT", Const, 0, ""}, + {"SIOCGETVIFCNT", Const, 0, ""}, + {"SIOCGETVLAN", Const, 0, ""}, + {"SIOCGHIWAT", Const, 0, ""}, + {"SIOCGIFADDR", Const, 0, ""}, + {"SIOCGIFADDRPREF", Const, 1, ""}, + {"SIOCGIFALIAS", Const, 1, ""}, + {"SIOCGIFALTMTU", Const, 0, ""}, + {"SIOCGIFASYNCMAP", Const, 0, ""}, + {"SIOCGIFBOND", Const, 0, ""}, + {"SIOCGIFBR", Const, 0, ""}, + {"SIOCGIFBRDADDR", Const, 0, ""}, + {"SIOCGIFCAP", Const, 0, ""}, + {"SIOCGIFCONF", Const, 0, ""}, + {"SIOCGIFCOUNT", Const, 0, ""}, + {"SIOCGIFDATA", Const, 1, ""}, + {"SIOCGIFDESCR", Const, 0, ""}, + {"SIOCGIFDEVMTU", Const, 0, ""}, + {"SIOCGIFDLT", Const, 1, ""}, + {"SIOCGIFDSTADDR", Const, 0, ""}, + {"SIOCGIFENCAP", Const, 0, ""}, + {"SIOCGIFFIB", Const, 1, ""}, + {"SIOCGIFFLAGS", Const, 0, ""}, + {"SIOCGIFGATTR", Const, 1, ""}, + {"SIOCGIFGENERIC", Const, 0, ""}, + {"SIOCGIFGMEMB", Const, 0, ""}, + {"SIOCGIFGROUP", Const, 0, ""}, + {"SIOCGIFHARDMTU", Const, 3, ""}, + {"SIOCGIFHWADDR", Const, 0, ""}, + {"SIOCGIFINDEX", Const, 0, ""}, + {"SIOCGIFKPI", Const, 0, ""}, + {"SIOCGIFMAC", Const, 0, ""}, + {"SIOCGIFMAP", Const, 0, ""}, + {"SIOCGIFMEDIA", Const, 0, ""}, + {"SIOCGIFMEM", Const, 0, ""}, + {"SIOCGIFMETRIC", Const, 0, ""}, + {"SIOCGIFMTU", Const, 0, ""}, + {"SIOCGIFNAME", Const, 0, ""}, + {"SIOCGIFNETMASK", Const, 0, ""}, + {"SIOCGIFPDSTADDR", Const, 0, ""}, + {"SIOCGIFPFLAGS", Const, 0, ""}, + {"SIOCGIFPHYS", Const, 0, ""}, + {"SIOCGIFPRIORITY", Const, 1, ""}, + {"SIOCGIFPSRCADDR", Const, 0, ""}, + {"SIOCGIFRDOMAIN", Const, 1, ""}, + {"SIOCGIFRTLABEL", Const, 1, ""}, + {"SIOCGIFSLAVE", Const, 0, ""}, + {"SIOCGIFSTATUS", Const, 0, ""}, + {"SIOCGIFTIMESLOT", Const, 1, ""}, + {"SIOCGIFTXQLEN", Const, 0, ""}, + {"SIOCGIFVLAN", Const, 0, ""}, + {"SIOCGIFWAKEFLAGS", Const, 0, ""}, + {"SIOCGIFXFLAGS", Const, 1, ""}, + {"SIOCGLIFADDR", Const, 0, ""}, + {"SIOCGLIFPHYADDR", Const, 0, ""}, + {"SIOCGLIFPHYRTABLE", Const, 1, ""}, + {"SIOCGLIFPHYTTL", Const, 3, ""}, + {"SIOCGLINKSTR", Const, 1, ""}, + {"SIOCGLOWAT", Const, 0, ""}, + {"SIOCGPGRP", Const, 0, ""}, + {"SIOCGPRIVATE_0", Const, 0, ""}, + {"SIOCGPRIVATE_1", Const, 0, ""}, + {"SIOCGRARP", Const, 0, ""}, + {"SIOCGSPPPPARAMS", Const, 3, ""}, + {"SIOCGSTAMP", Const, 0, ""}, + {"SIOCGSTAMPNS", Const, 0, ""}, + {"SIOCGVH", Const, 1, ""}, + {"SIOCGVNETID", Const, 3, ""}, + {"SIOCIFCREATE", Const, 0, ""}, + {"SIOCIFCREATE2", Const, 0, ""}, + {"SIOCIFDESTROY", Const, 0, ""}, + {"SIOCIFGCLONERS", Const, 0, ""}, + {"SIOCINITIFADDR", Const, 1, ""}, + {"SIOCPROTOPRIVATE", Const, 0, ""}, + {"SIOCRSLVMULTI", Const, 0, ""}, + {"SIOCRTMSG", Const, 0, ""}, + {"SIOCSARP", Const, 0, ""}, + {"SIOCSDRVSPEC", Const, 0, ""}, + {"SIOCSETKALIVE", Const, 1, ""}, + {"SIOCSETLABEL", Const, 1, ""}, + {"SIOCSETPFLOW", Const, 1, ""}, + {"SIOCSETPFSYNC", Const, 1, ""}, + {"SIOCSETVLAN", Const, 0, ""}, + {"SIOCSHIWAT", Const, 0, ""}, + {"SIOCSIFADDR", Const, 0, ""}, + {"SIOCSIFADDRPREF", Const, 1, ""}, + {"SIOCSIFALTMTU", Const, 0, ""}, + {"SIOCSIFASYNCMAP", Const, 0, ""}, + {"SIOCSIFBOND", Const, 0, ""}, + {"SIOCSIFBR", Const, 0, ""}, + {"SIOCSIFBRDADDR", Const, 0, ""}, + {"SIOCSIFCAP", Const, 0, ""}, + {"SIOCSIFDESCR", Const, 0, ""}, + {"SIOCSIFDSTADDR", Const, 0, ""}, + {"SIOCSIFENCAP", Const, 0, ""}, + {"SIOCSIFFIB", Const, 1, ""}, + {"SIOCSIFFLAGS", Const, 0, ""}, + {"SIOCSIFGATTR", Const, 1, ""}, + {"SIOCSIFGENERIC", Const, 0, ""}, + {"SIOCSIFHWADDR", Const, 0, ""}, + {"SIOCSIFHWBROADCAST", Const, 0, ""}, + {"SIOCSIFKPI", Const, 0, ""}, + {"SIOCSIFLINK", Const, 0, ""}, + {"SIOCSIFLLADDR", Const, 0, ""}, + {"SIOCSIFMAC", Const, 0, ""}, + {"SIOCSIFMAP", Const, 0, ""}, + {"SIOCSIFMEDIA", Const, 0, ""}, + {"SIOCSIFMEM", Const, 0, ""}, + {"SIOCSIFMETRIC", Const, 0, ""}, + {"SIOCSIFMTU", Const, 0, ""}, + {"SIOCSIFNAME", Const, 0, ""}, + {"SIOCSIFNETMASK", Const, 0, ""}, + {"SIOCSIFPFLAGS", Const, 0, ""}, + {"SIOCSIFPHYADDR", Const, 0, ""}, + {"SIOCSIFPHYS", Const, 0, ""}, + {"SIOCSIFPRIORITY", Const, 1, ""}, + {"SIOCSIFRDOMAIN", Const, 1, ""}, + {"SIOCSIFRTLABEL", Const, 1, ""}, + {"SIOCSIFRVNET", Const, 0, ""}, + {"SIOCSIFSLAVE", Const, 0, ""}, + {"SIOCSIFTIMESLOT", Const, 1, ""}, + {"SIOCSIFTXQLEN", Const, 0, ""}, + {"SIOCSIFVLAN", Const, 0, ""}, + {"SIOCSIFVNET", Const, 0, ""}, + {"SIOCSIFXFLAGS", Const, 1, ""}, + {"SIOCSLIFPHYADDR", Const, 0, ""}, + {"SIOCSLIFPHYRTABLE", Const, 1, ""}, + {"SIOCSLIFPHYTTL", Const, 3, ""}, + {"SIOCSLINKSTR", Const, 1, ""}, + {"SIOCSLOWAT", Const, 0, ""}, + {"SIOCSPGRP", Const, 0, ""}, + {"SIOCSRARP", Const, 0, ""}, + {"SIOCSSPPPPARAMS", Const, 3, ""}, + {"SIOCSVH", Const, 1, ""}, + {"SIOCSVNETID", Const, 3, ""}, + {"SIOCZIFDATA", Const, 1, ""}, + {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1, ""}, + {"SIO_GET_INTERFACE_LIST", Const, 0, ""}, + {"SIO_KEEPALIVE_VALS", Const, 3, ""}, + {"SIO_UDP_CONNRESET", Const, 4, ""}, + {"SOCK_CLOEXEC", Const, 0, ""}, + {"SOCK_DCCP", Const, 0, ""}, + {"SOCK_DGRAM", Const, 0, ""}, + {"SOCK_FLAGS_MASK", Const, 1, ""}, + {"SOCK_MAXADDRLEN", Const, 0, ""}, + {"SOCK_NONBLOCK", Const, 0, ""}, + {"SOCK_NOSIGPIPE", Const, 1, ""}, + {"SOCK_PACKET", Const, 0, ""}, + {"SOCK_RAW", Const, 0, ""}, + {"SOCK_RDM", Const, 0, ""}, + {"SOCK_SEQPACKET", Const, 0, ""}, + {"SOCK_STREAM", Const, 0, ""}, + {"SOL_AAL", Const, 0, ""}, + {"SOL_ATM", Const, 0, ""}, + {"SOL_DECNET", Const, 0, ""}, + {"SOL_ICMPV6", Const, 0, ""}, + {"SOL_IP", Const, 0, ""}, + {"SOL_IPV6", Const, 0, ""}, + {"SOL_IRDA", Const, 0, ""}, + {"SOL_PACKET", Const, 0, ""}, + {"SOL_RAW", Const, 0, ""}, + {"SOL_SOCKET", Const, 0, ""}, + {"SOL_TCP", Const, 0, ""}, + {"SOL_X25", Const, 0, ""}, + {"SOMAXCONN", Const, 0, ""}, + {"SO_ACCEPTCONN", Const, 0, ""}, + {"SO_ACCEPTFILTER", Const, 0, ""}, + {"SO_ATTACH_FILTER", Const, 0, ""}, + {"SO_BINDANY", Const, 1, ""}, + {"SO_BINDTODEVICE", Const, 0, ""}, + {"SO_BINTIME", Const, 0, ""}, + {"SO_BROADCAST", Const, 0, ""}, + {"SO_BSDCOMPAT", Const, 0, ""}, + {"SO_DEBUG", Const, 0, ""}, + {"SO_DETACH_FILTER", Const, 0, ""}, + {"SO_DOMAIN", Const, 0, ""}, + {"SO_DONTROUTE", Const, 0, ""}, + {"SO_DONTTRUNC", Const, 0, ""}, + {"SO_ERROR", Const, 0, ""}, + {"SO_KEEPALIVE", Const, 0, ""}, + {"SO_LABEL", Const, 0, ""}, + {"SO_LINGER", Const, 0, ""}, + {"SO_LINGER_SEC", Const, 0, ""}, + {"SO_LISTENINCQLEN", Const, 0, ""}, + {"SO_LISTENQLEN", Const, 0, ""}, + {"SO_LISTENQLIMIT", Const, 0, ""}, + {"SO_MARK", Const, 0, ""}, + {"SO_NETPROC", Const, 1, ""}, + {"SO_NKE", Const, 0, ""}, + {"SO_NOADDRERR", Const, 0, ""}, + {"SO_NOHEADER", Const, 1, ""}, + {"SO_NOSIGPIPE", Const, 0, ""}, + {"SO_NOTIFYCONFLICT", Const, 0, ""}, + {"SO_NO_CHECK", Const, 0, ""}, + {"SO_NO_DDP", Const, 0, ""}, + {"SO_NO_OFFLOAD", Const, 0, ""}, + {"SO_NP_EXTENSIONS", Const, 0, ""}, + {"SO_NREAD", Const, 0, ""}, + {"SO_NUMRCVPKT", Const, 16, ""}, + {"SO_NWRITE", Const, 0, ""}, + {"SO_OOBINLINE", Const, 0, ""}, + {"SO_OVERFLOWED", Const, 1, ""}, + {"SO_PASSCRED", Const, 0, ""}, + {"SO_PASSSEC", Const, 0, ""}, + {"SO_PEERCRED", Const, 0, ""}, + {"SO_PEERLABEL", Const, 0, ""}, + {"SO_PEERNAME", Const, 0, ""}, + {"SO_PEERSEC", Const, 0, ""}, + {"SO_PRIORITY", Const, 0, ""}, + {"SO_PROTOCOL", Const, 0, ""}, + {"SO_PROTOTYPE", Const, 1, ""}, + {"SO_RANDOMPORT", Const, 0, ""}, + {"SO_RCVBUF", Const, 0, ""}, + {"SO_RCVBUFFORCE", Const, 0, ""}, + {"SO_RCVLOWAT", Const, 0, ""}, + {"SO_RCVTIMEO", Const, 0, ""}, + {"SO_RESTRICTIONS", Const, 0, ""}, + {"SO_RESTRICT_DENYIN", Const, 0, ""}, + {"SO_RESTRICT_DENYOUT", Const, 0, ""}, + {"SO_RESTRICT_DENYSET", Const, 0, ""}, + {"SO_REUSEADDR", Const, 0, ""}, + {"SO_REUSEPORT", Const, 0, ""}, + {"SO_REUSESHAREUID", Const, 0, ""}, + {"SO_RTABLE", Const, 1, ""}, + {"SO_RXQ_OVFL", Const, 0, ""}, + {"SO_SECURITY_AUTHENTICATION", Const, 0, ""}, + {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0, ""}, + {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0, ""}, + {"SO_SETFIB", Const, 0, ""}, + {"SO_SNDBUF", Const, 0, ""}, + {"SO_SNDBUFFORCE", Const, 0, ""}, + {"SO_SNDLOWAT", Const, 0, ""}, + {"SO_SNDTIMEO", Const, 0, ""}, + {"SO_SPLICE", Const, 1, ""}, + {"SO_TIMESTAMP", Const, 0, ""}, + {"SO_TIMESTAMPING", Const, 0, ""}, + {"SO_TIMESTAMPNS", Const, 0, ""}, + {"SO_TIMESTAMP_MONOTONIC", Const, 0, ""}, + {"SO_TYPE", Const, 0, ""}, + {"SO_UPCALLCLOSEWAIT", Const, 0, ""}, + {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0, ""}, + {"SO_UPDATE_CONNECT_CONTEXT", Const, 1, ""}, + {"SO_USELOOPBACK", Const, 0, ""}, + {"SO_USER_COOKIE", Const, 1, ""}, + {"SO_VENDOR", Const, 3, ""}, + {"SO_WANTMORE", Const, 0, ""}, + {"SO_WANTOOBFLAG", Const, 0, ""}, + {"SSLExtraCertChainPolicyPara", Type, 0, ""}, + {"SSLExtraCertChainPolicyPara.AuthType", Field, 0, ""}, + {"SSLExtraCertChainPolicyPara.Checks", Field, 0, ""}, + {"SSLExtraCertChainPolicyPara.ServerName", Field, 0, ""}, + {"SSLExtraCertChainPolicyPara.Size", Field, 0, ""}, + {"STANDARD_RIGHTS_ALL", Const, 0, ""}, + {"STANDARD_RIGHTS_EXECUTE", Const, 0, ""}, + {"STANDARD_RIGHTS_READ", Const, 0, ""}, + {"STANDARD_RIGHTS_REQUIRED", Const, 0, ""}, + {"STANDARD_RIGHTS_WRITE", Const, 0, ""}, + {"STARTF_USESHOWWINDOW", Const, 0, ""}, + {"STARTF_USESTDHANDLES", Const, 0, ""}, + {"STD_ERROR_HANDLE", Const, 0, ""}, + {"STD_INPUT_HANDLE", Const, 0, ""}, + {"STD_OUTPUT_HANDLE", Const, 0, ""}, + {"SUBLANG_ENGLISH_US", Const, 0, ""}, + {"SW_FORCEMINIMIZE", Const, 0, ""}, + {"SW_HIDE", Const, 0, ""}, + {"SW_MAXIMIZE", Const, 0, ""}, + {"SW_MINIMIZE", Const, 0, ""}, + {"SW_NORMAL", Const, 0, ""}, + {"SW_RESTORE", Const, 0, ""}, + {"SW_SHOW", Const, 0, ""}, + {"SW_SHOWDEFAULT", Const, 0, ""}, + {"SW_SHOWMAXIMIZED", Const, 0, ""}, + {"SW_SHOWMINIMIZED", Const, 0, ""}, + {"SW_SHOWMINNOACTIVE", Const, 0, ""}, + {"SW_SHOWNA", Const, 0, ""}, + {"SW_SHOWNOACTIVATE", Const, 0, ""}, + {"SW_SHOWNORMAL", Const, 0, ""}, + {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4, ""}, + {"SYNCHRONIZE", Const, 0, ""}, + {"SYSCTL_VERSION", Const, 1, ""}, + {"SYSCTL_VERS_0", Const, 1, ""}, + {"SYSCTL_VERS_1", Const, 1, ""}, + {"SYSCTL_VERS_MASK", Const, 1, ""}, + {"SYS_ABORT2", Const, 0, ""}, + {"SYS_ACCEPT", Const, 0, ""}, + {"SYS_ACCEPT4", Const, 0, ""}, + {"SYS_ACCEPT_NOCANCEL", Const, 0, ""}, + {"SYS_ACCESS", Const, 0, ""}, + {"SYS_ACCESS_EXTENDED", Const, 0, ""}, + {"SYS_ACCT", Const, 0, ""}, + {"SYS_ADD_KEY", Const, 0, ""}, + {"SYS_ADD_PROFIL", Const, 0, ""}, + {"SYS_ADJFREQ", Const, 1, ""}, + {"SYS_ADJTIME", Const, 0, ""}, + {"SYS_ADJTIMEX", Const, 0, ""}, + {"SYS_AFS_SYSCALL", Const, 0, ""}, + {"SYS_AIO_CANCEL", Const, 0, ""}, + {"SYS_AIO_ERROR", Const, 0, ""}, + {"SYS_AIO_FSYNC", Const, 0, ""}, + {"SYS_AIO_MLOCK", Const, 14, ""}, + {"SYS_AIO_READ", Const, 0, ""}, + {"SYS_AIO_RETURN", Const, 0, ""}, + {"SYS_AIO_SUSPEND", Const, 0, ""}, + {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0, ""}, + {"SYS_AIO_WAITCOMPLETE", Const, 14, ""}, + {"SYS_AIO_WRITE", Const, 0, ""}, + {"SYS_ALARM", Const, 0, ""}, + {"SYS_ARCH_PRCTL", Const, 0, ""}, + {"SYS_ARM_FADVISE64_64", Const, 0, ""}, + {"SYS_ARM_SYNC_FILE_RANGE", Const, 0, ""}, + {"SYS_ATGETMSG", Const, 0, ""}, + {"SYS_ATPGETREQ", Const, 0, ""}, + {"SYS_ATPGETRSP", Const, 0, ""}, + {"SYS_ATPSNDREQ", Const, 0, ""}, + {"SYS_ATPSNDRSP", Const, 0, ""}, + {"SYS_ATPUTMSG", Const, 0, ""}, + {"SYS_ATSOCKET", Const, 0, ""}, + {"SYS_AUDIT", Const, 0, ""}, + {"SYS_AUDITCTL", Const, 0, ""}, + {"SYS_AUDITON", Const, 0, ""}, + {"SYS_AUDIT_SESSION_JOIN", Const, 0, ""}, + {"SYS_AUDIT_SESSION_PORT", Const, 0, ""}, + {"SYS_AUDIT_SESSION_SELF", Const, 0, ""}, + {"SYS_BDFLUSH", Const, 0, ""}, + {"SYS_BIND", Const, 0, ""}, + {"SYS_BINDAT", Const, 3, ""}, + {"SYS_BREAK", Const, 0, ""}, + {"SYS_BRK", Const, 0, ""}, + {"SYS_BSDTHREAD_CREATE", Const, 0, ""}, + {"SYS_BSDTHREAD_REGISTER", Const, 0, ""}, + {"SYS_BSDTHREAD_TERMINATE", Const, 0, ""}, + {"SYS_CAPGET", Const, 0, ""}, + {"SYS_CAPSET", Const, 0, ""}, + {"SYS_CAP_ENTER", Const, 0, ""}, + {"SYS_CAP_FCNTLS_GET", Const, 1, ""}, + {"SYS_CAP_FCNTLS_LIMIT", Const, 1, ""}, + {"SYS_CAP_GETMODE", Const, 0, ""}, + {"SYS_CAP_GETRIGHTS", Const, 0, ""}, + {"SYS_CAP_IOCTLS_GET", Const, 1, ""}, + {"SYS_CAP_IOCTLS_LIMIT", Const, 1, ""}, + {"SYS_CAP_NEW", Const, 0, ""}, + {"SYS_CAP_RIGHTS_GET", Const, 1, ""}, + {"SYS_CAP_RIGHTS_LIMIT", Const, 1, ""}, + {"SYS_CHDIR", Const, 0, ""}, + {"SYS_CHFLAGS", Const, 0, ""}, + {"SYS_CHFLAGSAT", Const, 3, ""}, + {"SYS_CHMOD", Const, 0, ""}, + {"SYS_CHMOD_EXTENDED", Const, 0, ""}, + {"SYS_CHOWN", Const, 0, ""}, + {"SYS_CHOWN32", Const, 0, ""}, + {"SYS_CHROOT", Const, 0, ""}, + {"SYS_CHUD", Const, 0, ""}, + {"SYS_CLOCK_ADJTIME", Const, 0, ""}, + {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1, ""}, + {"SYS_CLOCK_GETRES", Const, 0, ""}, + {"SYS_CLOCK_GETTIME", Const, 0, ""}, + {"SYS_CLOCK_NANOSLEEP", Const, 0, ""}, + {"SYS_CLOCK_SETTIME", Const, 0, ""}, + {"SYS_CLONE", Const, 0, ""}, + {"SYS_CLOSE", Const, 0, ""}, + {"SYS_CLOSEFROM", Const, 0, ""}, + {"SYS_CLOSE_NOCANCEL", Const, 0, ""}, + {"SYS_CONNECT", Const, 0, ""}, + {"SYS_CONNECTAT", Const, 3, ""}, + {"SYS_CONNECT_NOCANCEL", Const, 0, ""}, + {"SYS_COPYFILE", Const, 0, ""}, + {"SYS_CPUSET", Const, 0, ""}, + {"SYS_CPUSET_GETAFFINITY", Const, 0, ""}, + {"SYS_CPUSET_GETID", Const, 0, ""}, + {"SYS_CPUSET_SETAFFINITY", Const, 0, ""}, + {"SYS_CPUSET_SETID", Const, 0, ""}, + {"SYS_CREAT", Const, 0, ""}, + {"SYS_CREATE_MODULE", Const, 0, ""}, + {"SYS_CSOPS", Const, 0, ""}, + {"SYS_CSOPS_AUDITTOKEN", Const, 16, ""}, + {"SYS_DELETE", Const, 0, ""}, + {"SYS_DELETE_MODULE", Const, 0, ""}, + {"SYS_DUP", Const, 0, ""}, + {"SYS_DUP2", Const, 0, ""}, + {"SYS_DUP3", Const, 0, ""}, + {"SYS_EACCESS", Const, 0, ""}, + {"SYS_EPOLL_CREATE", Const, 0, ""}, + {"SYS_EPOLL_CREATE1", Const, 0, ""}, + {"SYS_EPOLL_CTL", Const, 0, ""}, + {"SYS_EPOLL_CTL_OLD", Const, 0, ""}, + {"SYS_EPOLL_PWAIT", Const, 0, ""}, + {"SYS_EPOLL_WAIT", Const, 0, ""}, + {"SYS_EPOLL_WAIT_OLD", Const, 0, ""}, + {"SYS_EVENTFD", Const, 0, ""}, + {"SYS_EVENTFD2", Const, 0, ""}, + {"SYS_EXCHANGEDATA", Const, 0, ""}, + {"SYS_EXECVE", Const, 0, ""}, + {"SYS_EXIT", Const, 0, ""}, + {"SYS_EXIT_GROUP", Const, 0, ""}, + {"SYS_EXTATTRCTL", Const, 0, ""}, + {"SYS_EXTATTR_DELETE_FD", Const, 0, ""}, + {"SYS_EXTATTR_DELETE_FILE", Const, 0, ""}, + {"SYS_EXTATTR_DELETE_LINK", Const, 0, ""}, + {"SYS_EXTATTR_GET_FD", Const, 0, ""}, + {"SYS_EXTATTR_GET_FILE", Const, 0, ""}, + {"SYS_EXTATTR_GET_LINK", Const, 0, ""}, + {"SYS_EXTATTR_LIST_FD", Const, 0, ""}, + {"SYS_EXTATTR_LIST_FILE", Const, 0, ""}, + {"SYS_EXTATTR_LIST_LINK", Const, 0, ""}, + {"SYS_EXTATTR_SET_FD", Const, 0, ""}, + {"SYS_EXTATTR_SET_FILE", Const, 0, ""}, + {"SYS_EXTATTR_SET_LINK", Const, 0, ""}, + {"SYS_FACCESSAT", Const, 0, ""}, + {"SYS_FADVISE64", Const, 0, ""}, + {"SYS_FADVISE64_64", Const, 0, ""}, + {"SYS_FALLOCATE", Const, 0, ""}, + {"SYS_FANOTIFY_INIT", Const, 0, ""}, + {"SYS_FANOTIFY_MARK", Const, 0, ""}, + {"SYS_FCHDIR", Const, 0, ""}, + {"SYS_FCHFLAGS", Const, 0, ""}, + {"SYS_FCHMOD", Const, 0, ""}, + {"SYS_FCHMODAT", Const, 0, ""}, + {"SYS_FCHMOD_EXTENDED", Const, 0, ""}, + {"SYS_FCHOWN", Const, 0, ""}, + {"SYS_FCHOWN32", Const, 0, ""}, + {"SYS_FCHOWNAT", Const, 0, ""}, + {"SYS_FCHROOT", Const, 1, ""}, + {"SYS_FCNTL", Const, 0, ""}, + {"SYS_FCNTL64", Const, 0, ""}, + {"SYS_FCNTL_NOCANCEL", Const, 0, ""}, + {"SYS_FDATASYNC", Const, 0, ""}, + {"SYS_FEXECVE", Const, 0, ""}, + {"SYS_FFCLOCK_GETCOUNTER", Const, 0, ""}, + {"SYS_FFCLOCK_GETESTIMATE", Const, 0, ""}, + {"SYS_FFCLOCK_SETESTIMATE", Const, 0, ""}, + {"SYS_FFSCTL", Const, 0, ""}, + {"SYS_FGETATTRLIST", Const, 0, ""}, + {"SYS_FGETXATTR", Const, 0, ""}, + {"SYS_FHOPEN", Const, 0, ""}, + {"SYS_FHSTAT", Const, 0, ""}, + {"SYS_FHSTATFS", Const, 0, ""}, + {"SYS_FILEPORT_MAKEFD", Const, 0, ""}, + {"SYS_FILEPORT_MAKEPORT", Const, 0, ""}, + {"SYS_FKTRACE", Const, 1, ""}, + {"SYS_FLISTXATTR", Const, 0, ""}, + {"SYS_FLOCK", Const, 0, ""}, + {"SYS_FORK", Const, 0, ""}, + {"SYS_FPATHCONF", Const, 0, ""}, + {"SYS_FREEBSD6_FTRUNCATE", Const, 0, ""}, + {"SYS_FREEBSD6_LSEEK", Const, 0, ""}, + {"SYS_FREEBSD6_MMAP", Const, 0, ""}, + {"SYS_FREEBSD6_PREAD", Const, 0, ""}, + {"SYS_FREEBSD6_PWRITE", Const, 0, ""}, + {"SYS_FREEBSD6_TRUNCATE", Const, 0, ""}, + {"SYS_FREMOVEXATTR", Const, 0, ""}, + {"SYS_FSCTL", Const, 0, ""}, + {"SYS_FSETATTRLIST", Const, 0, ""}, + {"SYS_FSETXATTR", Const, 0, ""}, + {"SYS_FSGETPATH", Const, 0, ""}, + {"SYS_FSTAT", Const, 0, ""}, + {"SYS_FSTAT64", Const, 0, ""}, + {"SYS_FSTAT64_EXTENDED", Const, 0, ""}, + {"SYS_FSTATAT", Const, 0, ""}, + {"SYS_FSTATAT64", Const, 0, ""}, + {"SYS_FSTATFS", Const, 0, ""}, + {"SYS_FSTATFS64", Const, 0, ""}, + {"SYS_FSTATV", Const, 0, ""}, + {"SYS_FSTATVFS1", Const, 1, ""}, + {"SYS_FSTAT_EXTENDED", Const, 0, ""}, + {"SYS_FSYNC", Const, 0, ""}, + {"SYS_FSYNC_NOCANCEL", Const, 0, ""}, + {"SYS_FSYNC_RANGE", Const, 1, ""}, + {"SYS_FTIME", Const, 0, ""}, + {"SYS_FTRUNCATE", Const, 0, ""}, + {"SYS_FTRUNCATE64", Const, 0, ""}, + {"SYS_FUTEX", Const, 0, ""}, + {"SYS_FUTIMENS", Const, 1, ""}, + {"SYS_FUTIMES", Const, 0, ""}, + {"SYS_FUTIMESAT", Const, 0, ""}, + {"SYS_GETATTRLIST", Const, 0, ""}, + {"SYS_GETAUDIT", Const, 0, ""}, + {"SYS_GETAUDIT_ADDR", Const, 0, ""}, + {"SYS_GETAUID", Const, 0, ""}, + {"SYS_GETCONTEXT", Const, 0, ""}, + {"SYS_GETCPU", Const, 0, ""}, + {"SYS_GETCWD", Const, 0, ""}, + {"SYS_GETDENTS", Const, 0, ""}, + {"SYS_GETDENTS64", Const, 0, ""}, + {"SYS_GETDIRENTRIES", Const, 0, ""}, + {"SYS_GETDIRENTRIES64", Const, 0, ""}, + {"SYS_GETDIRENTRIESATTR", Const, 0, ""}, + {"SYS_GETDTABLECOUNT", Const, 1, ""}, + {"SYS_GETDTABLESIZE", Const, 0, ""}, + {"SYS_GETEGID", Const, 0, ""}, + {"SYS_GETEGID32", Const, 0, ""}, + {"SYS_GETEUID", Const, 0, ""}, + {"SYS_GETEUID32", Const, 0, ""}, + {"SYS_GETFH", Const, 0, ""}, + {"SYS_GETFSSTAT", Const, 0, ""}, + {"SYS_GETFSSTAT64", Const, 0, ""}, + {"SYS_GETGID", Const, 0, ""}, + {"SYS_GETGID32", Const, 0, ""}, + {"SYS_GETGROUPS", Const, 0, ""}, + {"SYS_GETGROUPS32", Const, 0, ""}, + {"SYS_GETHOSTUUID", Const, 0, ""}, + {"SYS_GETITIMER", Const, 0, ""}, + {"SYS_GETLCID", Const, 0, ""}, + {"SYS_GETLOGIN", Const, 0, ""}, + {"SYS_GETLOGINCLASS", Const, 0, ""}, + {"SYS_GETPEERNAME", Const, 0, ""}, + {"SYS_GETPGID", Const, 0, ""}, + {"SYS_GETPGRP", Const, 0, ""}, + {"SYS_GETPID", Const, 0, ""}, + {"SYS_GETPMSG", Const, 0, ""}, + {"SYS_GETPPID", Const, 0, ""}, + {"SYS_GETPRIORITY", Const, 0, ""}, + {"SYS_GETRESGID", Const, 0, ""}, + {"SYS_GETRESGID32", Const, 0, ""}, + {"SYS_GETRESUID", Const, 0, ""}, + {"SYS_GETRESUID32", Const, 0, ""}, + {"SYS_GETRLIMIT", Const, 0, ""}, + {"SYS_GETRTABLE", Const, 1, ""}, + {"SYS_GETRUSAGE", Const, 0, ""}, + {"SYS_GETSGROUPS", Const, 0, ""}, + {"SYS_GETSID", Const, 0, ""}, + {"SYS_GETSOCKNAME", Const, 0, ""}, + {"SYS_GETSOCKOPT", Const, 0, ""}, + {"SYS_GETTHRID", Const, 1, ""}, + {"SYS_GETTID", Const, 0, ""}, + {"SYS_GETTIMEOFDAY", Const, 0, ""}, + {"SYS_GETUID", Const, 0, ""}, + {"SYS_GETUID32", Const, 0, ""}, + {"SYS_GETVFSSTAT", Const, 1, ""}, + {"SYS_GETWGROUPS", Const, 0, ""}, + {"SYS_GETXATTR", Const, 0, ""}, + {"SYS_GET_KERNEL_SYMS", Const, 0, ""}, + {"SYS_GET_MEMPOLICY", Const, 0, ""}, + {"SYS_GET_ROBUST_LIST", Const, 0, ""}, + {"SYS_GET_THREAD_AREA", Const, 0, ""}, + {"SYS_GSSD_SYSCALL", Const, 14, ""}, + {"SYS_GTTY", Const, 0, ""}, + {"SYS_IDENTITYSVC", Const, 0, ""}, + {"SYS_IDLE", Const, 0, ""}, + {"SYS_INITGROUPS", Const, 0, ""}, + {"SYS_INIT_MODULE", Const, 0, ""}, + {"SYS_INOTIFY_ADD_WATCH", Const, 0, ""}, + {"SYS_INOTIFY_INIT", Const, 0, ""}, + {"SYS_INOTIFY_INIT1", Const, 0, ""}, + {"SYS_INOTIFY_RM_WATCH", Const, 0, ""}, + {"SYS_IOCTL", Const, 0, ""}, + {"SYS_IOPERM", Const, 0, ""}, + {"SYS_IOPL", Const, 0, ""}, + {"SYS_IOPOLICYSYS", Const, 0, ""}, + {"SYS_IOPRIO_GET", Const, 0, ""}, + {"SYS_IOPRIO_SET", Const, 0, ""}, + {"SYS_IO_CANCEL", Const, 0, ""}, + {"SYS_IO_DESTROY", Const, 0, ""}, + {"SYS_IO_GETEVENTS", Const, 0, ""}, + {"SYS_IO_SETUP", Const, 0, ""}, + {"SYS_IO_SUBMIT", Const, 0, ""}, + {"SYS_IPC", Const, 0, ""}, + {"SYS_ISSETUGID", Const, 0, ""}, + {"SYS_JAIL", Const, 0, ""}, + {"SYS_JAIL_ATTACH", Const, 0, ""}, + {"SYS_JAIL_GET", Const, 0, ""}, + {"SYS_JAIL_REMOVE", Const, 0, ""}, + {"SYS_JAIL_SET", Const, 0, ""}, + {"SYS_KAS_INFO", Const, 16, ""}, + {"SYS_KDEBUG_TRACE", Const, 0, ""}, + {"SYS_KENV", Const, 0, ""}, + {"SYS_KEVENT", Const, 0, ""}, + {"SYS_KEVENT64", Const, 0, ""}, + {"SYS_KEXEC_LOAD", Const, 0, ""}, + {"SYS_KEYCTL", Const, 0, ""}, + {"SYS_KILL", Const, 0, ""}, + {"SYS_KLDFIND", Const, 0, ""}, + {"SYS_KLDFIRSTMOD", Const, 0, ""}, + {"SYS_KLDLOAD", Const, 0, ""}, + {"SYS_KLDNEXT", Const, 0, ""}, + {"SYS_KLDSTAT", Const, 0, ""}, + {"SYS_KLDSYM", Const, 0, ""}, + {"SYS_KLDUNLOAD", Const, 0, ""}, + {"SYS_KLDUNLOADF", Const, 0, ""}, + {"SYS_KMQ_NOTIFY", Const, 14, ""}, + {"SYS_KMQ_OPEN", Const, 14, ""}, + {"SYS_KMQ_SETATTR", Const, 14, ""}, + {"SYS_KMQ_TIMEDRECEIVE", Const, 14, ""}, + {"SYS_KMQ_TIMEDSEND", Const, 14, ""}, + {"SYS_KMQ_UNLINK", Const, 14, ""}, + {"SYS_KQUEUE", Const, 0, ""}, + {"SYS_KQUEUE1", Const, 1, ""}, + {"SYS_KSEM_CLOSE", Const, 14, ""}, + {"SYS_KSEM_DESTROY", Const, 14, ""}, + {"SYS_KSEM_GETVALUE", Const, 14, ""}, + {"SYS_KSEM_INIT", Const, 14, ""}, + {"SYS_KSEM_OPEN", Const, 14, ""}, + {"SYS_KSEM_POST", Const, 14, ""}, + {"SYS_KSEM_TIMEDWAIT", Const, 14, ""}, + {"SYS_KSEM_TRYWAIT", Const, 14, ""}, + {"SYS_KSEM_UNLINK", Const, 14, ""}, + {"SYS_KSEM_WAIT", Const, 14, ""}, + {"SYS_KTIMER_CREATE", Const, 0, ""}, + {"SYS_KTIMER_DELETE", Const, 0, ""}, + {"SYS_KTIMER_GETOVERRUN", Const, 0, ""}, + {"SYS_KTIMER_GETTIME", Const, 0, ""}, + {"SYS_KTIMER_SETTIME", Const, 0, ""}, + {"SYS_KTRACE", Const, 0, ""}, + {"SYS_LCHFLAGS", Const, 0, ""}, + {"SYS_LCHMOD", Const, 0, ""}, + {"SYS_LCHOWN", Const, 0, ""}, + {"SYS_LCHOWN32", Const, 0, ""}, + {"SYS_LEDGER", Const, 16, ""}, + {"SYS_LGETFH", Const, 0, ""}, + {"SYS_LGETXATTR", Const, 0, ""}, + {"SYS_LINK", Const, 0, ""}, + {"SYS_LINKAT", Const, 0, ""}, + {"SYS_LIO_LISTIO", Const, 0, ""}, + {"SYS_LISTEN", Const, 0, ""}, + {"SYS_LISTXATTR", Const, 0, ""}, + {"SYS_LLISTXATTR", Const, 0, ""}, + {"SYS_LOCK", Const, 0, ""}, + {"SYS_LOOKUP_DCOOKIE", Const, 0, ""}, + {"SYS_LPATHCONF", Const, 0, ""}, + {"SYS_LREMOVEXATTR", Const, 0, ""}, + {"SYS_LSEEK", Const, 0, ""}, + {"SYS_LSETXATTR", Const, 0, ""}, + {"SYS_LSTAT", Const, 0, ""}, + {"SYS_LSTAT64", Const, 0, ""}, + {"SYS_LSTAT64_EXTENDED", Const, 0, ""}, + {"SYS_LSTATV", Const, 0, ""}, + {"SYS_LSTAT_EXTENDED", Const, 0, ""}, + {"SYS_LUTIMES", Const, 0, ""}, + {"SYS_MAC_SYSCALL", Const, 0, ""}, + {"SYS_MADVISE", Const, 0, ""}, + {"SYS_MADVISE1", Const, 0, ""}, + {"SYS_MAXSYSCALL", Const, 0, ""}, + {"SYS_MBIND", Const, 0, ""}, + {"SYS_MIGRATE_PAGES", Const, 0, ""}, + {"SYS_MINCORE", Const, 0, ""}, + {"SYS_MINHERIT", Const, 0, ""}, + {"SYS_MKCOMPLEX", Const, 0, ""}, + {"SYS_MKDIR", Const, 0, ""}, + {"SYS_MKDIRAT", Const, 0, ""}, + {"SYS_MKDIR_EXTENDED", Const, 0, ""}, + {"SYS_MKFIFO", Const, 0, ""}, + {"SYS_MKFIFOAT", Const, 0, ""}, + {"SYS_MKFIFO_EXTENDED", Const, 0, ""}, + {"SYS_MKNOD", Const, 0, ""}, + {"SYS_MKNODAT", Const, 0, ""}, + {"SYS_MLOCK", Const, 0, ""}, + {"SYS_MLOCKALL", Const, 0, ""}, + {"SYS_MMAP", Const, 0, ""}, + {"SYS_MMAP2", Const, 0, ""}, + {"SYS_MODCTL", Const, 1, ""}, + {"SYS_MODFIND", Const, 0, ""}, + {"SYS_MODFNEXT", Const, 0, ""}, + {"SYS_MODIFY_LDT", Const, 0, ""}, + {"SYS_MODNEXT", Const, 0, ""}, + {"SYS_MODSTAT", Const, 0, ""}, + {"SYS_MODWATCH", Const, 0, ""}, + {"SYS_MOUNT", Const, 0, ""}, + {"SYS_MOVE_PAGES", Const, 0, ""}, + {"SYS_MPROTECT", Const, 0, ""}, + {"SYS_MPX", Const, 0, ""}, + {"SYS_MQUERY", Const, 1, ""}, + {"SYS_MQ_GETSETATTR", Const, 0, ""}, + {"SYS_MQ_NOTIFY", Const, 0, ""}, + {"SYS_MQ_OPEN", Const, 0, ""}, + {"SYS_MQ_TIMEDRECEIVE", Const, 0, ""}, + {"SYS_MQ_TIMEDSEND", Const, 0, ""}, + {"SYS_MQ_UNLINK", Const, 0, ""}, + {"SYS_MREMAP", Const, 0, ""}, + {"SYS_MSGCTL", Const, 0, ""}, + {"SYS_MSGGET", Const, 0, ""}, + {"SYS_MSGRCV", Const, 0, ""}, + {"SYS_MSGRCV_NOCANCEL", Const, 0, ""}, + {"SYS_MSGSND", Const, 0, ""}, + {"SYS_MSGSND_NOCANCEL", Const, 0, ""}, + {"SYS_MSGSYS", Const, 0, ""}, + {"SYS_MSYNC", Const, 0, ""}, + {"SYS_MSYNC_NOCANCEL", Const, 0, ""}, + {"SYS_MUNLOCK", Const, 0, ""}, + {"SYS_MUNLOCKALL", Const, 0, ""}, + {"SYS_MUNMAP", Const, 0, ""}, + {"SYS_NAME_TO_HANDLE_AT", Const, 0, ""}, + {"SYS_NANOSLEEP", Const, 0, ""}, + {"SYS_NEWFSTATAT", Const, 0, ""}, + {"SYS_NFSCLNT", Const, 0, ""}, + {"SYS_NFSSERVCTL", Const, 0, ""}, + {"SYS_NFSSVC", Const, 0, ""}, + {"SYS_NFSTAT", Const, 0, ""}, + {"SYS_NICE", Const, 0, ""}, + {"SYS_NLM_SYSCALL", Const, 14, ""}, + {"SYS_NLSTAT", Const, 0, ""}, + {"SYS_NMOUNT", Const, 0, ""}, + {"SYS_NSTAT", Const, 0, ""}, + {"SYS_NTP_ADJTIME", Const, 0, ""}, + {"SYS_NTP_GETTIME", Const, 0, ""}, + {"SYS_NUMA_GETAFFINITY", Const, 14, ""}, + {"SYS_NUMA_SETAFFINITY", Const, 14, ""}, + {"SYS_OABI_SYSCALL_BASE", Const, 0, ""}, + {"SYS_OBREAK", Const, 0, ""}, + {"SYS_OLDFSTAT", Const, 0, ""}, + {"SYS_OLDLSTAT", Const, 0, ""}, + {"SYS_OLDOLDUNAME", Const, 0, ""}, + {"SYS_OLDSTAT", Const, 0, ""}, + {"SYS_OLDUNAME", Const, 0, ""}, + {"SYS_OPEN", Const, 0, ""}, + {"SYS_OPENAT", Const, 0, ""}, + {"SYS_OPENBSD_POLL", Const, 0, ""}, + {"SYS_OPEN_BY_HANDLE_AT", Const, 0, ""}, + {"SYS_OPEN_DPROTECTED_NP", Const, 16, ""}, + {"SYS_OPEN_EXTENDED", Const, 0, ""}, + {"SYS_OPEN_NOCANCEL", Const, 0, ""}, + {"SYS_OVADVISE", Const, 0, ""}, + {"SYS_PACCEPT", Const, 1, ""}, + {"SYS_PATHCONF", Const, 0, ""}, + {"SYS_PAUSE", Const, 0, ""}, + {"SYS_PCICONFIG_IOBASE", Const, 0, ""}, + {"SYS_PCICONFIG_READ", Const, 0, ""}, + {"SYS_PCICONFIG_WRITE", Const, 0, ""}, + {"SYS_PDFORK", Const, 0, ""}, + {"SYS_PDGETPID", Const, 0, ""}, + {"SYS_PDKILL", Const, 0, ""}, + {"SYS_PERF_EVENT_OPEN", Const, 0, ""}, + {"SYS_PERSONALITY", Const, 0, ""}, + {"SYS_PID_HIBERNATE", Const, 0, ""}, + {"SYS_PID_RESUME", Const, 0, ""}, + {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0, ""}, + {"SYS_PID_SUSPEND", Const, 0, ""}, + {"SYS_PIPE", Const, 0, ""}, + {"SYS_PIPE2", Const, 0, ""}, + {"SYS_PIVOT_ROOT", Const, 0, ""}, + {"SYS_PMC_CONTROL", Const, 1, ""}, + {"SYS_PMC_GET_INFO", Const, 1, ""}, + {"SYS_POLL", Const, 0, ""}, + {"SYS_POLLTS", Const, 1, ""}, + {"SYS_POLL_NOCANCEL", Const, 0, ""}, + {"SYS_POSIX_FADVISE", Const, 0, ""}, + {"SYS_POSIX_FALLOCATE", Const, 0, ""}, + {"SYS_POSIX_OPENPT", Const, 0, ""}, + {"SYS_POSIX_SPAWN", Const, 0, ""}, + {"SYS_PPOLL", Const, 0, ""}, + {"SYS_PRCTL", Const, 0, ""}, + {"SYS_PREAD", Const, 0, ""}, + {"SYS_PREAD64", Const, 0, ""}, + {"SYS_PREADV", Const, 0, ""}, + {"SYS_PREAD_NOCANCEL", Const, 0, ""}, + {"SYS_PRLIMIT64", Const, 0, ""}, + {"SYS_PROCCTL", Const, 3, ""}, + {"SYS_PROCESS_POLICY", Const, 0, ""}, + {"SYS_PROCESS_VM_READV", Const, 0, ""}, + {"SYS_PROCESS_VM_WRITEV", Const, 0, ""}, + {"SYS_PROC_INFO", Const, 0, ""}, + {"SYS_PROF", Const, 0, ""}, + {"SYS_PROFIL", Const, 0, ""}, + {"SYS_PSELECT", Const, 0, ""}, + {"SYS_PSELECT6", Const, 0, ""}, + {"SYS_PSET_ASSIGN", Const, 1, ""}, + {"SYS_PSET_CREATE", Const, 1, ""}, + {"SYS_PSET_DESTROY", Const, 1, ""}, + {"SYS_PSYNCH_CVBROAD", Const, 0, ""}, + {"SYS_PSYNCH_CVCLRPREPOST", Const, 0, ""}, + {"SYS_PSYNCH_CVSIGNAL", Const, 0, ""}, + {"SYS_PSYNCH_CVWAIT", Const, 0, ""}, + {"SYS_PSYNCH_MUTEXDROP", Const, 0, ""}, + {"SYS_PSYNCH_MUTEXWAIT", Const, 0, ""}, + {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0, ""}, + {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0, ""}, + {"SYS_PSYNCH_RW_RDLOCK", Const, 0, ""}, + {"SYS_PSYNCH_RW_UNLOCK", Const, 0, ""}, + {"SYS_PSYNCH_RW_UNLOCK2", Const, 0, ""}, + {"SYS_PSYNCH_RW_UPGRADE", Const, 0, ""}, + {"SYS_PSYNCH_RW_WRLOCK", Const, 0, ""}, + {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0, ""}, + {"SYS_PTRACE", Const, 0, ""}, + {"SYS_PUTPMSG", Const, 0, ""}, + {"SYS_PWRITE", Const, 0, ""}, + {"SYS_PWRITE64", Const, 0, ""}, + {"SYS_PWRITEV", Const, 0, ""}, + {"SYS_PWRITE_NOCANCEL", Const, 0, ""}, + {"SYS_QUERY_MODULE", Const, 0, ""}, + {"SYS_QUOTACTL", Const, 0, ""}, + {"SYS_RASCTL", Const, 1, ""}, + {"SYS_RCTL_ADD_RULE", Const, 0, ""}, + {"SYS_RCTL_GET_LIMITS", Const, 0, ""}, + {"SYS_RCTL_GET_RACCT", Const, 0, ""}, + {"SYS_RCTL_GET_RULES", Const, 0, ""}, + {"SYS_RCTL_REMOVE_RULE", Const, 0, ""}, + {"SYS_READ", Const, 0, ""}, + {"SYS_READAHEAD", Const, 0, ""}, + {"SYS_READDIR", Const, 0, ""}, + {"SYS_READLINK", Const, 0, ""}, + {"SYS_READLINKAT", Const, 0, ""}, + {"SYS_READV", Const, 0, ""}, + {"SYS_READV_NOCANCEL", Const, 0, ""}, + {"SYS_READ_NOCANCEL", Const, 0, ""}, + {"SYS_REBOOT", Const, 0, ""}, + {"SYS_RECV", Const, 0, ""}, + {"SYS_RECVFROM", Const, 0, ""}, + {"SYS_RECVFROM_NOCANCEL", Const, 0, ""}, + {"SYS_RECVMMSG", Const, 0, ""}, + {"SYS_RECVMSG", Const, 0, ""}, + {"SYS_RECVMSG_NOCANCEL", Const, 0, ""}, + {"SYS_REMAP_FILE_PAGES", Const, 0, ""}, + {"SYS_REMOVEXATTR", Const, 0, ""}, + {"SYS_RENAME", Const, 0, ""}, + {"SYS_RENAMEAT", Const, 0, ""}, + {"SYS_REQUEST_KEY", Const, 0, ""}, + {"SYS_RESTART_SYSCALL", Const, 0, ""}, + {"SYS_REVOKE", Const, 0, ""}, + {"SYS_RFORK", Const, 0, ""}, + {"SYS_RMDIR", Const, 0, ""}, + {"SYS_RTPRIO", Const, 0, ""}, + {"SYS_RTPRIO_THREAD", Const, 0, ""}, + {"SYS_RT_SIGACTION", Const, 0, ""}, + {"SYS_RT_SIGPENDING", Const, 0, ""}, + {"SYS_RT_SIGPROCMASK", Const, 0, ""}, + {"SYS_RT_SIGQUEUEINFO", Const, 0, ""}, + {"SYS_RT_SIGRETURN", Const, 0, ""}, + {"SYS_RT_SIGSUSPEND", Const, 0, ""}, + {"SYS_RT_SIGTIMEDWAIT", Const, 0, ""}, + {"SYS_RT_TGSIGQUEUEINFO", Const, 0, ""}, + {"SYS_SBRK", Const, 0, ""}, + {"SYS_SCHED_GETAFFINITY", Const, 0, ""}, + {"SYS_SCHED_GETPARAM", Const, 0, ""}, + {"SYS_SCHED_GETSCHEDULER", Const, 0, ""}, + {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0, ""}, + {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0, ""}, + {"SYS_SCHED_RR_GET_INTERVAL", Const, 0, ""}, + {"SYS_SCHED_SETAFFINITY", Const, 0, ""}, + {"SYS_SCHED_SETPARAM", Const, 0, ""}, + {"SYS_SCHED_SETSCHEDULER", Const, 0, ""}, + {"SYS_SCHED_YIELD", Const, 0, ""}, + {"SYS_SCTP_GENERIC_RECVMSG", Const, 0, ""}, + {"SYS_SCTP_GENERIC_SENDMSG", Const, 0, ""}, + {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0, ""}, + {"SYS_SCTP_PEELOFF", Const, 0, ""}, + {"SYS_SEARCHFS", Const, 0, ""}, + {"SYS_SECURITY", Const, 0, ""}, + {"SYS_SELECT", Const, 0, ""}, + {"SYS_SELECT_NOCANCEL", Const, 0, ""}, + {"SYS_SEMCONFIG", Const, 1, ""}, + {"SYS_SEMCTL", Const, 0, ""}, + {"SYS_SEMGET", Const, 0, ""}, + {"SYS_SEMOP", Const, 0, ""}, + {"SYS_SEMSYS", Const, 0, ""}, + {"SYS_SEMTIMEDOP", Const, 0, ""}, + {"SYS_SEM_CLOSE", Const, 0, ""}, + {"SYS_SEM_DESTROY", Const, 0, ""}, + {"SYS_SEM_GETVALUE", Const, 0, ""}, + {"SYS_SEM_INIT", Const, 0, ""}, + {"SYS_SEM_OPEN", Const, 0, ""}, + {"SYS_SEM_POST", Const, 0, ""}, + {"SYS_SEM_TRYWAIT", Const, 0, ""}, + {"SYS_SEM_UNLINK", Const, 0, ""}, + {"SYS_SEM_WAIT", Const, 0, ""}, + {"SYS_SEM_WAIT_NOCANCEL", Const, 0, ""}, + {"SYS_SEND", Const, 0, ""}, + {"SYS_SENDFILE", Const, 0, ""}, + {"SYS_SENDFILE64", Const, 0, ""}, + {"SYS_SENDMMSG", Const, 0, ""}, + {"SYS_SENDMSG", Const, 0, ""}, + {"SYS_SENDMSG_NOCANCEL", Const, 0, ""}, + {"SYS_SENDTO", Const, 0, ""}, + {"SYS_SENDTO_NOCANCEL", Const, 0, ""}, + {"SYS_SETATTRLIST", Const, 0, ""}, + {"SYS_SETAUDIT", Const, 0, ""}, + {"SYS_SETAUDIT_ADDR", Const, 0, ""}, + {"SYS_SETAUID", Const, 0, ""}, + {"SYS_SETCONTEXT", Const, 0, ""}, + {"SYS_SETDOMAINNAME", Const, 0, ""}, + {"SYS_SETEGID", Const, 0, ""}, + {"SYS_SETEUID", Const, 0, ""}, + {"SYS_SETFIB", Const, 0, ""}, + {"SYS_SETFSGID", Const, 0, ""}, + {"SYS_SETFSGID32", Const, 0, ""}, + {"SYS_SETFSUID", Const, 0, ""}, + {"SYS_SETFSUID32", Const, 0, ""}, + {"SYS_SETGID", Const, 0, ""}, + {"SYS_SETGID32", Const, 0, ""}, + {"SYS_SETGROUPS", Const, 0, ""}, + {"SYS_SETGROUPS32", Const, 0, ""}, + {"SYS_SETHOSTNAME", Const, 0, ""}, + {"SYS_SETITIMER", Const, 0, ""}, + {"SYS_SETLCID", Const, 0, ""}, + {"SYS_SETLOGIN", Const, 0, ""}, + {"SYS_SETLOGINCLASS", Const, 0, ""}, + {"SYS_SETNS", Const, 0, ""}, + {"SYS_SETPGID", Const, 0, ""}, + {"SYS_SETPRIORITY", Const, 0, ""}, + {"SYS_SETPRIVEXEC", Const, 0, ""}, + {"SYS_SETREGID", Const, 0, ""}, + {"SYS_SETREGID32", Const, 0, ""}, + {"SYS_SETRESGID", Const, 0, ""}, + {"SYS_SETRESGID32", Const, 0, ""}, + {"SYS_SETRESUID", Const, 0, ""}, + {"SYS_SETRESUID32", Const, 0, ""}, + {"SYS_SETREUID", Const, 0, ""}, + {"SYS_SETREUID32", Const, 0, ""}, + {"SYS_SETRLIMIT", Const, 0, ""}, + {"SYS_SETRTABLE", Const, 1, ""}, + {"SYS_SETSGROUPS", Const, 0, ""}, + {"SYS_SETSID", Const, 0, ""}, + {"SYS_SETSOCKOPT", Const, 0, ""}, + {"SYS_SETTID", Const, 0, ""}, + {"SYS_SETTID_WITH_PID", Const, 0, ""}, + {"SYS_SETTIMEOFDAY", Const, 0, ""}, + {"SYS_SETUID", Const, 0, ""}, + {"SYS_SETUID32", Const, 0, ""}, + {"SYS_SETWGROUPS", Const, 0, ""}, + {"SYS_SETXATTR", Const, 0, ""}, + {"SYS_SET_MEMPOLICY", Const, 0, ""}, + {"SYS_SET_ROBUST_LIST", Const, 0, ""}, + {"SYS_SET_THREAD_AREA", Const, 0, ""}, + {"SYS_SET_TID_ADDRESS", Const, 0, ""}, + {"SYS_SGETMASK", Const, 0, ""}, + {"SYS_SHARED_REGION_CHECK_NP", Const, 0, ""}, + {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0, ""}, + {"SYS_SHMAT", Const, 0, ""}, + {"SYS_SHMCTL", Const, 0, ""}, + {"SYS_SHMDT", Const, 0, ""}, + {"SYS_SHMGET", Const, 0, ""}, + {"SYS_SHMSYS", Const, 0, ""}, + {"SYS_SHM_OPEN", Const, 0, ""}, + {"SYS_SHM_UNLINK", Const, 0, ""}, + {"SYS_SHUTDOWN", Const, 0, ""}, + {"SYS_SIGACTION", Const, 0, ""}, + {"SYS_SIGALTSTACK", Const, 0, ""}, + {"SYS_SIGNAL", Const, 0, ""}, + {"SYS_SIGNALFD", Const, 0, ""}, + {"SYS_SIGNALFD4", Const, 0, ""}, + {"SYS_SIGPENDING", Const, 0, ""}, + {"SYS_SIGPROCMASK", Const, 0, ""}, + {"SYS_SIGQUEUE", Const, 0, ""}, + {"SYS_SIGQUEUEINFO", Const, 1, ""}, + {"SYS_SIGRETURN", Const, 0, ""}, + {"SYS_SIGSUSPEND", Const, 0, ""}, + {"SYS_SIGSUSPEND_NOCANCEL", Const, 0, ""}, + {"SYS_SIGTIMEDWAIT", Const, 0, ""}, + {"SYS_SIGWAIT", Const, 0, ""}, + {"SYS_SIGWAITINFO", Const, 0, ""}, + {"SYS_SOCKET", Const, 0, ""}, + {"SYS_SOCKETCALL", Const, 0, ""}, + {"SYS_SOCKETPAIR", Const, 0, ""}, + {"SYS_SPLICE", Const, 0, ""}, + {"SYS_SSETMASK", Const, 0, ""}, + {"SYS_SSTK", Const, 0, ""}, + {"SYS_STACK_SNAPSHOT", Const, 0, ""}, + {"SYS_STAT", Const, 0, ""}, + {"SYS_STAT64", Const, 0, ""}, + {"SYS_STAT64_EXTENDED", Const, 0, ""}, + {"SYS_STATFS", Const, 0, ""}, + {"SYS_STATFS64", Const, 0, ""}, + {"SYS_STATV", Const, 0, ""}, + {"SYS_STATVFS1", Const, 1, ""}, + {"SYS_STAT_EXTENDED", Const, 0, ""}, + {"SYS_STIME", Const, 0, ""}, + {"SYS_STTY", Const, 0, ""}, + {"SYS_SWAPCONTEXT", Const, 0, ""}, + {"SYS_SWAPCTL", Const, 1, ""}, + {"SYS_SWAPOFF", Const, 0, ""}, + {"SYS_SWAPON", Const, 0, ""}, + {"SYS_SYMLINK", Const, 0, ""}, + {"SYS_SYMLINKAT", Const, 0, ""}, + {"SYS_SYNC", Const, 0, ""}, + {"SYS_SYNCFS", Const, 0, ""}, + {"SYS_SYNC_FILE_RANGE", Const, 0, ""}, + {"SYS_SYSARCH", Const, 0, ""}, + {"SYS_SYSCALL", Const, 0, ""}, + {"SYS_SYSCALL_BASE", Const, 0, ""}, + {"SYS_SYSFS", Const, 0, ""}, + {"SYS_SYSINFO", Const, 0, ""}, + {"SYS_SYSLOG", Const, 0, ""}, + {"SYS_TEE", Const, 0, ""}, + {"SYS_TGKILL", Const, 0, ""}, + {"SYS_THREAD_SELFID", Const, 0, ""}, + {"SYS_THR_CREATE", Const, 0, ""}, + {"SYS_THR_EXIT", Const, 0, ""}, + {"SYS_THR_KILL", Const, 0, ""}, + {"SYS_THR_KILL2", Const, 0, ""}, + {"SYS_THR_NEW", Const, 0, ""}, + {"SYS_THR_SELF", Const, 0, ""}, + {"SYS_THR_SET_NAME", Const, 0, ""}, + {"SYS_THR_SUSPEND", Const, 0, ""}, + {"SYS_THR_WAKE", Const, 0, ""}, + {"SYS_TIME", Const, 0, ""}, + {"SYS_TIMERFD_CREATE", Const, 0, ""}, + {"SYS_TIMERFD_GETTIME", Const, 0, ""}, + {"SYS_TIMERFD_SETTIME", Const, 0, ""}, + {"SYS_TIMER_CREATE", Const, 0, ""}, + {"SYS_TIMER_DELETE", Const, 0, ""}, + {"SYS_TIMER_GETOVERRUN", Const, 0, ""}, + {"SYS_TIMER_GETTIME", Const, 0, ""}, + {"SYS_TIMER_SETTIME", Const, 0, ""}, + {"SYS_TIMES", Const, 0, ""}, + {"SYS_TKILL", Const, 0, ""}, + {"SYS_TRUNCATE", Const, 0, ""}, + {"SYS_TRUNCATE64", Const, 0, ""}, + {"SYS_TUXCALL", Const, 0, ""}, + {"SYS_UGETRLIMIT", Const, 0, ""}, + {"SYS_ULIMIT", Const, 0, ""}, + {"SYS_UMASK", Const, 0, ""}, + {"SYS_UMASK_EXTENDED", Const, 0, ""}, + {"SYS_UMOUNT", Const, 0, ""}, + {"SYS_UMOUNT2", Const, 0, ""}, + {"SYS_UNAME", Const, 0, ""}, + {"SYS_UNDELETE", Const, 0, ""}, + {"SYS_UNLINK", Const, 0, ""}, + {"SYS_UNLINKAT", Const, 0, ""}, + {"SYS_UNMOUNT", Const, 0, ""}, + {"SYS_UNSHARE", Const, 0, ""}, + {"SYS_USELIB", Const, 0, ""}, + {"SYS_USTAT", Const, 0, ""}, + {"SYS_UTIME", Const, 0, ""}, + {"SYS_UTIMENSAT", Const, 0, ""}, + {"SYS_UTIMES", Const, 0, ""}, + {"SYS_UTRACE", Const, 0, ""}, + {"SYS_UUIDGEN", Const, 0, ""}, + {"SYS_VADVISE", Const, 1, ""}, + {"SYS_VFORK", Const, 0, ""}, + {"SYS_VHANGUP", Const, 0, ""}, + {"SYS_VM86", Const, 0, ""}, + {"SYS_VM86OLD", Const, 0, ""}, + {"SYS_VMSPLICE", Const, 0, ""}, + {"SYS_VM_PRESSURE_MONITOR", Const, 0, ""}, + {"SYS_VSERVER", Const, 0, ""}, + {"SYS_WAIT4", Const, 0, ""}, + {"SYS_WAIT4_NOCANCEL", Const, 0, ""}, + {"SYS_WAIT6", Const, 1, ""}, + {"SYS_WAITEVENT", Const, 0, ""}, + {"SYS_WAITID", Const, 0, ""}, + {"SYS_WAITID_NOCANCEL", Const, 0, ""}, + {"SYS_WAITPID", Const, 0, ""}, + {"SYS_WATCHEVENT", Const, 0, ""}, + {"SYS_WORKQ_KERNRETURN", Const, 0, ""}, + {"SYS_WORKQ_OPEN", Const, 0, ""}, + {"SYS_WRITE", Const, 0, ""}, + {"SYS_WRITEV", Const, 0, ""}, + {"SYS_WRITEV_NOCANCEL", Const, 0, ""}, + {"SYS_WRITE_NOCANCEL", Const, 0, ""}, + {"SYS_YIELD", Const, 0, ""}, + {"SYS__LLSEEK", Const, 0, ""}, + {"SYS__LWP_CONTINUE", Const, 1, ""}, + {"SYS__LWP_CREATE", Const, 1, ""}, + {"SYS__LWP_CTL", Const, 1, ""}, + {"SYS__LWP_DETACH", Const, 1, ""}, + {"SYS__LWP_EXIT", Const, 1, ""}, + {"SYS__LWP_GETNAME", Const, 1, ""}, + {"SYS__LWP_GETPRIVATE", Const, 1, ""}, + {"SYS__LWP_KILL", Const, 1, ""}, + {"SYS__LWP_PARK", Const, 1, ""}, + {"SYS__LWP_SELF", Const, 1, ""}, + {"SYS__LWP_SETNAME", Const, 1, ""}, + {"SYS__LWP_SETPRIVATE", Const, 1, ""}, + {"SYS__LWP_SUSPEND", Const, 1, ""}, + {"SYS__LWP_UNPARK", Const, 1, ""}, + {"SYS__LWP_UNPARK_ALL", Const, 1, ""}, + {"SYS__LWP_WAIT", Const, 1, ""}, + {"SYS__LWP_WAKEUP", Const, 1, ""}, + {"SYS__NEWSELECT", Const, 0, ""}, + {"SYS__PSET_BIND", Const, 1, ""}, + {"SYS__SCHED_GETAFFINITY", Const, 1, ""}, + {"SYS__SCHED_GETPARAM", Const, 1, ""}, + {"SYS__SCHED_SETAFFINITY", Const, 1, ""}, + {"SYS__SCHED_SETPARAM", Const, 1, ""}, + {"SYS__SYSCTL", Const, 0, ""}, + {"SYS__UMTX_LOCK", Const, 0, ""}, + {"SYS__UMTX_OP", Const, 0, ""}, + {"SYS__UMTX_UNLOCK", Const, 0, ""}, + {"SYS___ACL_ACLCHECK_FD", Const, 0, ""}, + {"SYS___ACL_ACLCHECK_FILE", Const, 0, ""}, + {"SYS___ACL_ACLCHECK_LINK", Const, 0, ""}, + {"SYS___ACL_DELETE_FD", Const, 0, ""}, + {"SYS___ACL_DELETE_FILE", Const, 0, ""}, + {"SYS___ACL_DELETE_LINK", Const, 0, ""}, + {"SYS___ACL_GET_FD", Const, 0, ""}, + {"SYS___ACL_GET_FILE", Const, 0, ""}, + {"SYS___ACL_GET_LINK", Const, 0, ""}, + {"SYS___ACL_SET_FD", Const, 0, ""}, + {"SYS___ACL_SET_FILE", Const, 0, ""}, + {"SYS___ACL_SET_LINK", Const, 0, ""}, + {"SYS___CAP_RIGHTS_GET", Const, 14, ""}, + {"SYS___CLONE", Const, 1, ""}, + {"SYS___DISABLE_THREADSIGNAL", Const, 0, ""}, + {"SYS___GETCWD", Const, 0, ""}, + {"SYS___GETLOGIN", Const, 1, ""}, + {"SYS___GET_TCB", Const, 1, ""}, + {"SYS___MAC_EXECVE", Const, 0, ""}, + {"SYS___MAC_GETFSSTAT", Const, 0, ""}, + {"SYS___MAC_GET_FD", Const, 0, ""}, + {"SYS___MAC_GET_FILE", Const, 0, ""}, + {"SYS___MAC_GET_LCID", Const, 0, ""}, + {"SYS___MAC_GET_LCTX", Const, 0, ""}, + {"SYS___MAC_GET_LINK", Const, 0, ""}, + {"SYS___MAC_GET_MOUNT", Const, 0, ""}, + {"SYS___MAC_GET_PID", Const, 0, ""}, + {"SYS___MAC_GET_PROC", Const, 0, ""}, + {"SYS___MAC_MOUNT", Const, 0, ""}, + {"SYS___MAC_SET_FD", Const, 0, ""}, + {"SYS___MAC_SET_FILE", Const, 0, ""}, + {"SYS___MAC_SET_LCTX", Const, 0, ""}, + {"SYS___MAC_SET_LINK", Const, 0, ""}, + {"SYS___MAC_SET_PROC", Const, 0, ""}, + {"SYS___MAC_SYSCALL", Const, 0, ""}, + {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0, ""}, + {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""}, + {"SYS___POSIX_CHOWN", Const, 1, ""}, + {"SYS___POSIX_FCHOWN", Const, 1, ""}, + {"SYS___POSIX_LCHOWN", Const, 1, ""}, + {"SYS___POSIX_RENAME", Const, 1, ""}, + {"SYS___PTHREAD_CANCELED", Const, 0, ""}, + {"SYS___PTHREAD_CHDIR", Const, 0, ""}, + {"SYS___PTHREAD_FCHDIR", Const, 0, ""}, + {"SYS___PTHREAD_KILL", Const, 0, ""}, + {"SYS___PTHREAD_MARKCANCEL", Const, 0, ""}, + {"SYS___PTHREAD_SIGMASK", Const, 0, ""}, + {"SYS___QUOTACTL", Const, 1, ""}, + {"SYS___SEMCTL", Const, 1, ""}, + {"SYS___SEMWAIT_SIGNAL", Const, 0, ""}, + {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""}, + {"SYS___SETLOGIN", Const, 1, ""}, + {"SYS___SETUGID", Const, 0, ""}, + {"SYS___SET_TCB", Const, 1, ""}, + {"SYS___SIGACTION_SIGTRAMP", Const, 1, ""}, + {"SYS___SIGTIMEDWAIT", Const, 1, ""}, + {"SYS___SIGWAIT", Const, 0, ""}, + {"SYS___SIGWAIT_NOCANCEL", Const, 0, ""}, + {"SYS___SYSCTL", Const, 0, ""}, + {"SYS___TFORK", Const, 1, ""}, + {"SYS___THREXIT", Const, 1, ""}, + {"SYS___THRSIGDIVERT", Const, 1, ""}, + {"SYS___THRSLEEP", Const, 1, ""}, + {"SYS___THRWAKEUP", Const, 1, ""}, + {"S_ARCH1", Const, 1, ""}, + {"S_ARCH2", Const, 1, ""}, + {"S_BLKSIZE", Const, 0, ""}, + {"S_IEXEC", Const, 0, ""}, + {"S_IFBLK", Const, 0, ""}, + {"S_IFCHR", Const, 0, ""}, + {"S_IFDIR", Const, 0, ""}, + {"S_IFIFO", Const, 0, ""}, + {"S_IFLNK", Const, 0, ""}, + {"S_IFMT", Const, 0, ""}, + {"S_IFREG", Const, 0, ""}, + {"S_IFSOCK", Const, 0, ""}, + {"S_IFWHT", Const, 0, ""}, + {"S_IREAD", Const, 0, ""}, + {"S_IRGRP", Const, 0, ""}, + {"S_IROTH", Const, 0, ""}, + {"S_IRUSR", Const, 0, ""}, + {"S_IRWXG", Const, 0, ""}, + {"S_IRWXO", Const, 0, ""}, + {"S_IRWXU", Const, 0, ""}, + {"S_ISGID", Const, 0, ""}, + {"S_ISTXT", Const, 0, ""}, + {"S_ISUID", Const, 0, ""}, + {"S_ISVTX", Const, 0, ""}, + {"S_IWGRP", Const, 0, ""}, + {"S_IWOTH", Const, 0, ""}, + {"S_IWRITE", Const, 0, ""}, + {"S_IWUSR", Const, 0, ""}, + {"S_IXGRP", Const, 0, ""}, + {"S_IXOTH", Const, 0, ""}, + {"S_IXUSR", Const, 0, ""}, + {"S_LOGIN_SET", Const, 1, ""}, + {"SecurityAttributes", Type, 0, ""}, + {"SecurityAttributes.InheritHandle", Field, 0, ""}, + {"SecurityAttributes.Length", Field, 0, ""}, + {"SecurityAttributes.SecurityDescriptor", Field, 0, ""}, + {"Seek", Func, 0, "func(fd int, offset int64, whence int) (off int64, err error)"}, + {"Select", Func, 0, "func(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)"}, + {"Sendfile", Func, 0, "func(outfd int, infd int, offset *int64, count int) (written int, err error)"}, + {"Sendmsg", Func, 0, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (err error)"}, + {"SendmsgN", Func, 3, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (n int, err error)"}, + {"Sendto", Func, 0, "func(fd int, p []byte, flags int, to Sockaddr) (err error)"}, + {"Servent", Type, 0, ""}, + {"Servent.Aliases", Field, 0, ""}, + {"Servent.Name", Field, 0, ""}, + {"Servent.Port", Field, 0, ""}, + {"Servent.Proto", Field, 0, ""}, + {"SetBpf", Func, 0, ""}, + {"SetBpfBuflen", Func, 0, ""}, + {"SetBpfDatalink", Func, 0, ""}, + {"SetBpfHeadercmpl", Func, 0, ""}, + {"SetBpfImmediate", Func, 0, ""}, + {"SetBpfInterface", Func, 0, ""}, + {"SetBpfPromisc", Func, 0, ""}, + {"SetBpfTimeout", Func, 0, ""}, + {"SetCurrentDirectory", Func, 0, ""}, + {"SetEndOfFile", Func, 0, ""}, + {"SetEnvironmentVariable", Func, 0, ""}, + {"SetFileAttributes", Func, 0, ""}, + {"SetFileCompletionNotificationModes", Func, 2, ""}, + {"SetFilePointer", Func, 0, ""}, + {"SetFileTime", Func, 0, ""}, + {"SetHandleInformation", Func, 0, ""}, + {"SetKevent", Func, 0, ""}, + {"SetLsfPromisc", Func, 0, "func(name string, m bool) error"}, + {"SetNonblock", Func, 0, "func(fd int, nonblocking bool) (err error)"}, + {"Setdomainname", Func, 0, "func(p []byte) (err error)"}, + {"Setegid", Func, 0, "func(egid int) (err error)"}, + {"Setenv", Func, 0, "func(key string, value string) error"}, + {"Seteuid", Func, 0, "func(euid int) (err error)"}, + {"Setfsgid", Func, 0, "func(gid int) (err error)"}, + {"Setfsuid", Func, 0, "func(uid int) (err error)"}, + {"Setgid", Func, 0, "func(gid int) (err error)"}, + {"Setgroups", Func, 0, "func(gids []int) (err error)"}, + {"Sethostname", Func, 0, "func(p []byte) (err error)"}, + {"Setlogin", Func, 0, ""}, + {"Setpgid", Func, 0, "func(pid int, pgid int) (err error)"}, + {"Setpriority", Func, 0, "func(which int, who int, prio int) (err error)"}, + {"Setprivexec", Func, 0, ""}, + {"Setregid", Func, 0, "func(rgid int, egid int) (err error)"}, + {"Setresgid", Func, 0, "func(rgid int, egid int, sgid int) (err error)"}, + {"Setresuid", Func, 0, "func(ruid int, euid int, suid int) (err error)"}, + {"Setreuid", Func, 0, "func(ruid int, euid int) (err error)"}, + {"Setrlimit", Func, 0, "func(resource int, rlim *Rlimit) error"}, + {"Setsid", Func, 0, "func() (pid int, err error)"}, + {"Setsockopt", Func, 0, ""}, + {"SetsockoptByte", Func, 0, "func(fd int, level int, opt int, value byte) (err error)"}, + {"SetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int, filter *ICMPv6Filter) error"}, + {"SetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int, mreq *IPMreq) (err error)"}, + {"SetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int, mreq *IPMreqn) (err error)"}, + {"SetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int, mreq *IPv6Mreq) (err error)"}, + {"SetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int, value [4]byte) (err error)"}, + {"SetsockoptInt", Func, 0, "func(fd int, level int, opt int, value int) (err error)"}, + {"SetsockoptLinger", Func, 0, "func(fd int, level int, opt int, l *Linger) (err error)"}, + {"SetsockoptString", Func, 0, "func(fd int, level int, opt int, s string) (err error)"}, + {"SetsockoptTimeval", Func, 0, "func(fd int, level int, opt int, tv *Timeval) (err error)"}, + {"Settimeofday", Func, 0, "func(tv *Timeval) (err error)"}, + {"Setuid", Func, 0, "func(uid int) (err error)"}, + {"Setxattr", Func, 1, "func(path string, attr string, data []byte, flags int) (err error)"}, + {"Shutdown", Func, 0, "func(fd int, how int) (err error)"}, + {"SidTypeAlias", Const, 0, ""}, + {"SidTypeComputer", Const, 0, ""}, + {"SidTypeDeletedAccount", Const, 0, ""}, + {"SidTypeDomain", Const, 0, ""}, + {"SidTypeGroup", Const, 0, ""}, + {"SidTypeInvalid", Const, 0, ""}, + {"SidTypeLabel", Const, 0, ""}, + {"SidTypeUnknown", Const, 0, ""}, + {"SidTypeUser", Const, 0, ""}, + {"SidTypeWellKnownGroup", Const, 0, ""}, + {"Signal", Type, 0, ""}, + {"SizeofBpfHdr", Const, 0, ""}, + {"SizeofBpfInsn", Const, 0, ""}, + {"SizeofBpfProgram", Const, 0, ""}, + {"SizeofBpfStat", Const, 0, ""}, + {"SizeofBpfVersion", Const, 0, ""}, + {"SizeofBpfZbuf", Const, 0, ""}, + {"SizeofBpfZbufHeader", Const, 0, ""}, + {"SizeofCmsghdr", Const, 0, ""}, + {"SizeofICMPv6Filter", Const, 2, ""}, + {"SizeofIPMreq", Const, 0, ""}, + {"SizeofIPMreqn", Const, 0, ""}, + {"SizeofIPv6MTUInfo", Const, 2, ""}, + {"SizeofIPv6Mreq", Const, 0, ""}, + {"SizeofIfAddrmsg", Const, 0, ""}, + {"SizeofIfAnnounceMsghdr", Const, 1, ""}, + {"SizeofIfData", Const, 0, ""}, + {"SizeofIfInfomsg", Const, 0, ""}, + {"SizeofIfMsghdr", Const, 0, ""}, + {"SizeofIfaMsghdr", Const, 0, ""}, + {"SizeofIfmaMsghdr", Const, 0, ""}, + {"SizeofIfmaMsghdr2", Const, 0, ""}, + {"SizeofInet4Pktinfo", Const, 0, ""}, + {"SizeofInet6Pktinfo", Const, 0, ""}, + {"SizeofInotifyEvent", Const, 0, ""}, + {"SizeofLinger", Const, 0, ""}, + {"SizeofMsghdr", Const, 0, ""}, + {"SizeofNlAttr", Const, 0, ""}, + {"SizeofNlMsgerr", Const, 0, ""}, + {"SizeofNlMsghdr", Const, 0, ""}, + {"SizeofRtAttr", Const, 0, ""}, + {"SizeofRtGenmsg", Const, 0, ""}, + {"SizeofRtMetrics", Const, 0, ""}, + {"SizeofRtMsg", Const, 0, ""}, + {"SizeofRtMsghdr", Const, 0, ""}, + {"SizeofRtNexthop", Const, 0, ""}, + {"SizeofSockFilter", Const, 0, ""}, + {"SizeofSockFprog", Const, 0, ""}, + {"SizeofSockaddrAny", Const, 0, ""}, + {"SizeofSockaddrDatalink", Const, 0, ""}, + {"SizeofSockaddrInet4", Const, 0, ""}, + {"SizeofSockaddrInet6", Const, 0, ""}, + {"SizeofSockaddrLinklayer", Const, 0, ""}, + {"SizeofSockaddrNetlink", Const, 0, ""}, + {"SizeofSockaddrUnix", Const, 0, ""}, + {"SizeofTCPInfo", Const, 1, ""}, + {"SizeofUcred", Const, 0, ""}, + {"SlicePtrFromStrings", Func, 1, "func(ss []string) ([]*byte, error)"}, + {"SockFilter", Type, 0, ""}, + {"SockFilter.Code", Field, 0, ""}, + {"SockFilter.Jf", Field, 0, ""}, + {"SockFilter.Jt", Field, 0, ""}, + {"SockFilter.K", Field, 0, ""}, + {"SockFprog", Type, 0, ""}, + {"SockFprog.Filter", Field, 0, ""}, + {"SockFprog.Len", Field, 0, ""}, + {"SockFprog.Pad_cgo_0", Field, 0, ""}, + {"Sockaddr", Type, 0, ""}, + {"SockaddrDatalink", Type, 0, ""}, + {"SockaddrDatalink.Alen", Field, 0, ""}, + {"SockaddrDatalink.Data", Field, 0, ""}, + {"SockaddrDatalink.Family", Field, 0, ""}, + {"SockaddrDatalink.Index", Field, 0, ""}, + {"SockaddrDatalink.Len", Field, 0, ""}, + {"SockaddrDatalink.Nlen", Field, 0, ""}, + {"SockaddrDatalink.Slen", Field, 0, ""}, + {"SockaddrDatalink.Type", Field, 0, ""}, + {"SockaddrGen", Type, 0, ""}, + {"SockaddrInet4", Type, 0, ""}, + {"SockaddrInet4.Addr", Field, 0, ""}, + {"SockaddrInet4.Port", Field, 0, ""}, + {"SockaddrInet6", Type, 0, ""}, + {"SockaddrInet6.Addr", Field, 0, ""}, + {"SockaddrInet6.Port", Field, 0, ""}, + {"SockaddrInet6.ZoneId", Field, 0, ""}, + {"SockaddrLinklayer", Type, 0, ""}, + {"SockaddrLinklayer.Addr", Field, 0, ""}, + {"SockaddrLinklayer.Halen", Field, 0, ""}, + {"SockaddrLinklayer.Hatype", Field, 0, ""}, + {"SockaddrLinklayer.Ifindex", Field, 0, ""}, + {"SockaddrLinklayer.Pkttype", Field, 0, ""}, + {"SockaddrLinklayer.Protocol", Field, 0, ""}, + {"SockaddrNetlink", Type, 0, ""}, + {"SockaddrNetlink.Family", Field, 0, ""}, + {"SockaddrNetlink.Groups", Field, 0, ""}, + {"SockaddrNetlink.Pad", Field, 0, ""}, + {"SockaddrNetlink.Pid", Field, 0, ""}, + {"SockaddrUnix", Type, 0, ""}, + {"SockaddrUnix.Name", Field, 0, ""}, + {"Socket", Func, 0, "func(domain int, typ int, proto int) (fd int, err error)"}, + {"SocketControlMessage", Type, 0, ""}, + {"SocketControlMessage.Data", Field, 0, ""}, + {"SocketControlMessage.Header", Field, 0, ""}, + {"SocketDisableIPv6", Var, 0, ""}, + {"Socketpair", Func, 0, "func(domain int, typ int, proto int) (fd [2]int, err error)"}, + {"Splice", Func, 0, "func(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)"}, + {"StartProcess", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error)"}, + {"StartupInfo", Type, 0, ""}, + {"StartupInfo.Cb", Field, 0, ""}, + {"StartupInfo.Desktop", Field, 0, ""}, + {"StartupInfo.FillAttribute", Field, 0, ""}, + {"StartupInfo.Flags", Field, 0, ""}, + {"StartupInfo.ShowWindow", Field, 0, ""}, + {"StartupInfo.StdErr", Field, 0, ""}, + {"StartupInfo.StdInput", Field, 0, ""}, + {"StartupInfo.StdOutput", Field, 0, ""}, + {"StartupInfo.Title", Field, 0, ""}, + {"StartupInfo.X", Field, 0, ""}, + {"StartupInfo.XCountChars", Field, 0, ""}, + {"StartupInfo.XSize", Field, 0, ""}, + {"StartupInfo.Y", Field, 0, ""}, + {"StartupInfo.YCountChars", Field, 0, ""}, + {"StartupInfo.YSize", Field, 0, ""}, + {"Stat", Func, 0, "func(path string, stat *Stat_t) (err error)"}, + {"Stat_t", Type, 0, ""}, + {"Stat_t.Atim", Field, 0, ""}, + {"Stat_t.Atim_ext", Field, 12, ""}, + {"Stat_t.Atimespec", Field, 0, ""}, + {"Stat_t.Birthtimespec", Field, 0, ""}, + {"Stat_t.Blksize", Field, 0, ""}, + {"Stat_t.Blocks", Field, 0, ""}, + {"Stat_t.Btim_ext", Field, 12, ""}, + {"Stat_t.Ctim", Field, 0, ""}, + {"Stat_t.Ctim_ext", Field, 12, ""}, + {"Stat_t.Ctimespec", Field, 0, ""}, + {"Stat_t.Dev", Field, 0, ""}, + {"Stat_t.Flags", Field, 0, ""}, + {"Stat_t.Gen", Field, 0, ""}, + {"Stat_t.Gid", Field, 0, ""}, + {"Stat_t.Ino", Field, 0, ""}, + {"Stat_t.Lspare", Field, 0, ""}, + {"Stat_t.Lspare0", Field, 2, ""}, + {"Stat_t.Lspare1", Field, 2, ""}, + {"Stat_t.Mode", Field, 0, ""}, + {"Stat_t.Mtim", Field, 0, ""}, + {"Stat_t.Mtim_ext", Field, 12, ""}, + {"Stat_t.Mtimespec", Field, 0, ""}, + {"Stat_t.Nlink", Field, 0, ""}, + {"Stat_t.Pad_cgo_0", Field, 0, ""}, + {"Stat_t.Pad_cgo_1", Field, 0, ""}, + {"Stat_t.Pad_cgo_2", Field, 0, ""}, + {"Stat_t.Padding0", Field, 12, ""}, + {"Stat_t.Padding1", Field, 12, ""}, + {"Stat_t.Qspare", Field, 0, ""}, + {"Stat_t.Rdev", Field, 0, ""}, + {"Stat_t.Size", Field, 0, ""}, + {"Stat_t.Spare", Field, 2, ""}, + {"Stat_t.Uid", Field, 0, ""}, + {"Stat_t.X__pad0", Field, 0, ""}, + {"Stat_t.X__pad1", Field, 0, ""}, + {"Stat_t.X__pad2", Field, 0, ""}, + {"Stat_t.X__st_birthtim", Field, 2, ""}, + {"Stat_t.X__st_ino", Field, 0, ""}, + {"Stat_t.X__unused", Field, 0, ""}, + {"Statfs", Func, 0, "func(path string, buf *Statfs_t) (err error)"}, + {"Statfs_t", Type, 0, ""}, + {"Statfs_t.Asyncreads", Field, 0, ""}, + {"Statfs_t.Asyncwrites", Field, 0, ""}, + {"Statfs_t.Bavail", Field, 0, ""}, + {"Statfs_t.Bfree", Field, 0, ""}, + {"Statfs_t.Blocks", Field, 0, ""}, + {"Statfs_t.Bsize", Field, 0, ""}, + {"Statfs_t.Charspare", Field, 0, ""}, + {"Statfs_t.F_asyncreads", Field, 2, ""}, + {"Statfs_t.F_asyncwrites", Field, 2, ""}, + {"Statfs_t.F_bavail", Field, 2, ""}, + {"Statfs_t.F_bfree", Field, 2, ""}, + {"Statfs_t.F_blocks", Field, 2, ""}, + {"Statfs_t.F_bsize", Field, 2, ""}, + {"Statfs_t.F_ctime", Field, 2, ""}, + {"Statfs_t.F_favail", Field, 2, ""}, + {"Statfs_t.F_ffree", Field, 2, ""}, + {"Statfs_t.F_files", Field, 2, ""}, + {"Statfs_t.F_flags", Field, 2, ""}, + {"Statfs_t.F_fsid", Field, 2, ""}, + {"Statfs_t.F_fstypename", Field, 2, ""}, + {"Statfs_t.F_iosize", Field, 2, ""}, + {"Statfs_t.F_mntfromname", Field, 2, ""}, + {"Statfs_t.F_mntfromspec", Field, 3, ""}, + {"Statfs_t.F_mntonname", Field, 2, ""}, + {"Statfs_t.F_namemax", Field, 2, ""}, + {"Statfs_t.F_owner", Field, 2, ""}, + {"Statfs_t.F_spare", Field, 2, ""}, + {"Statfs_t.F_syncreads", Field, 2, ""}, + {"Statfs_t.F_syncwrites", Field, 2, ""}, + {"Statfs_t.Ffree", Field, 0, ""}, + {"Statfs_t.Files", Field, 0, ""}, + {"Statfs_t.Flags", Field, 0, ""}, + {"Statfs_t.Frsize", Field, 0, ""}, + {"Statfs_t.Fsid", Field, 0, ""}, + {"Statfs_t.Fssubtype", Field, 0, ""}, + {"Statfs_t.Fstypename", Field, 0, ""}, + {"Statfs_t.Iosize", Field, 0, ""}, + {"Statfs_t.Mntfromname", Field, 0, ""}, + {"Statfs_t.Mntonname", Field, 0, ""}, + {"Statfs_t.Mount_info", Field, 2, ""}, + {"Statfs_t.Namelen", Field, 0, ""}, + {"Statfs_t.Namemax", Field, 0, ""}, + {"Statfs_t.Owner", Field, 0, ""}, + {"Statfs_t.Pad_cgo_0", Field, 0, ""}, + {"Statfs_t.Pad_cgo_1", Field, 2, ""}, + {"Statfs_t.Reserved", Field, 0, ""}, + {"Statfs_t.Spare", Field, 0, ""}, + {"Statfs_t.Syncreads", Field, 0, ""}, + {"Statfs_t.Syncwrites", Field, 0, ""}, + {"Statfs_t.Type", Field, 0, ""}, + {"Statfs_t.Version", Field, 0, ""}, + {"Stderr", Var, 0, ""}, + {"Stdin", Var, 0, ""}, + {"Stdout", Var, 0, ""}, + {"StringBytePtr", Func, 0, "func(s string) *byte"}, + {"StringByteSlice", Func, 0, "func(s string) []byte"}, + {"StringSlicePtr", Func, 0, "func(ss []string) []*byte"}, + {"StringToSid", Func, 0, ""}, + {"StringToUTF16", Func, 0, ""}, + {"StringToUTF16Ptr", Func, 0, ""}, + {"Symlink", Func, 0, "func(oldpath string, newpath string) (err error)"}, + {"Sync", Func, 0, "func()"}, + {"SyncFileRange", Func, 0, "func(fd int, off int64, n int64, flags int) (err error)"}, + {"SysProcAttr", Type, 0, ""}, + {"SysProcAttr.AdditionalInheritedHandles", Field, 17, ""}, + {"SysProcAttr.AmbientCaps", Field, 9, ""}, + {"SysProcAttr.CgroupFD", Field, 20, ""}, + {"SysProcAttr.Chroot", Field, 0, ""}, + {"SysProcAttr.Cloneflags", Field, 2, ""}, + {"SysProcAttr.CmdLine", Field, 0, ""}, + {"SysProcAttr.CreationFlags", Field, 1, ""}, + {"SysProcAttr.Credential", Field, 0, ""}, + {"SysProcAttr.Ctty", Field, 1, ""}, + {"SysProcAttr.Foreground", Field, 5, ""}, + {"SysProcAttr.GidMappings", Field, 4, ""}, + {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5, ""}, + {"SysProcAttr.HideWindow", Field, 0, ""}, + {"SysProcAttr.Jail", Field, 21, ""}, + {"SysProcAttr.NoInheritHandles", Field, 16, ""}, + {"SysProcAttr.Noctty", Field, 0, ""}, + {"SysProcAttr.ParentProcess", Field, 17, ""}, + {"SysProcAttr.Pdeathsig", Field, 0, ""}, + {"SysProcAttr.Pgid", Field, 5, ""}, + {"SysProcAttr.PidFD", Field, 22, ""}, + {"SysProcAttr.ProcessAttributes", Field, 13, ""}, + {"SysProcAttr.Ptrace", Field, 0, ""}, + {"SysProcAttr.Setctty", Field, 0, ""}, + {"SysProcAttr.Setpgid", Field, 0, ""}, + {"SysProcAttr.Setsid", Field, 0, ""}, + {"SysProcAttr.ThreadAttributes", Field, 13, ""}, + {"SysProcAttr.Token", Field, 10, ""}, + {"SysProcAttr.UidMappings", Field, 4, ""}, + {"SysProcAttr.Unshareflags", Field, 7, ""}, + {"SysProcAttr.UseCgroupFD", Field, 20, ""}, + {"SysProcIDMap", Type, 4, ""}, + {"SysProcIDMap.ContainerID", Field, 4, ""}, + {"SysProcIDMap.HostID", Field, 4, ""}, + {"SysProcIDMap.Size", Field, 4, ""}, + {"Syscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"}, + {"Syscall12", Func, 0, ""}, + {"Syscall15", Func, 0, ""}, + {"Syscall18", Func, 12, ""}, + {"Syscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"}, + {"Syscall9", Func, 0, ""}, + {"SyscallN", Func, 18, ""}, + {"Sysctl", Func, 0, ""}, + {"SysctlUint32", Func, 0, ""}, + {"Sysctlnode", Type, 2, ""}, + {"Sysctlnode.Flags", Field, 2, ""}, + {"Sysctlnode.Name", Field, 2, ""}, + {"Sysctlnode.Num", Field, 2, ""}, + {"Sysctlnode.Un", Field, 2, ""}, + {"Sysctlnode.Ver", Field, 2, ""}, + {"Sysctlnode.X__rsvd", Field, 2, ""}, + {"Sysctlnode.X_sysctl_desc", Field, 2, ""}, + {"Sysctlnode.X_sysctl_func", Field, 2, ""}, + {"Sysctlnode.X_sysctl_parent", Field, 2, ""}, + {"Sysctlnode.X_sysctl_size", Field, 2, ""}, + {"Sysinfo", Func, 0, "func(info *Sysinfo_t) (err error)"}, + {"Sysinfo_t", Type, 0, ""}, + {"Sysinfo_t.Bufferram", Field, 0, ""}, + {"Sysinfo_t.Freehigh", Field, 0, ""}, + {"Sysinfo_t.Freeram", Field, 0, ""}, + {"Sysinfo_t.Freeswap", Field, 0, ""}, + {"Sysinfo_t.Loads", Field, 0, ""}, + {"Sysinfo_t.Pad", Field, 0, ""}, + {"Sysinfo_t.Pad_cgo_0", Field, 0, ""}, + {"Sysinfo_t.Pad_cgo_1", Field, 0, ""}, + {"Sysinfo_t.Procs", Field, 0, ""}, + {"Sysinfo_t.Sharedram", Field, 0, ""}, + {"Sysinfo_t.Totalhigh", Field, 0, ""}, + {"Sysinfo_t.Totalram", Field, 0, ""}, + {"Sysinfo_t.Totalswap", Field, 0, ""}, + {"Sysinfo_t.Unit", Field, 0, ""}, + {"Sysinfo_t.Uptime", Field, 0, ""}, + {"Sysinfo_t.X_f", Field, 0, ""}, + {"Systemtime", Type, 0, ""}, + {"Systemtime.Day", Field, 0, ""}, + {"Systemtime.DayOfWeek", Field, 0, ""}, + {"Systemtime.Hour", Field, 0, ""}, + {"Systemtime.Milliseconds", Field, 0, ""}, + {"Systemtime.Minute", Field, 0, ""}, + {"Systemtime.Month", Field, 0, ""}, + {"Systemtime.Second", Field, 0, ""}, + {"Systemtime.Year", Field, 0, ""}, + {"TCGETS", Const, 0, ""}, + {"TCIFLUSH", Const, 1, ""}, + {"TCIOFLUSH", Const, 1, ""}, + {"TCOFLUSH", Const, 1, ""}, + {"TCPInfo", Type, 1, ""}, + {"TCPInfo.Advmss", Field, 1, ""}, + {"TCPInfo.Ato", Field, 1, ""}, + {"TCPInfo.Backoff", Field, 1, ""}, + {"TCPInfo.Ca_state", Field, 1, ""}, + {"TCPInfo.Fackets", Field, 1, ""}, + {"TCPInfo.Last_ack_recv", Field, 1, ""}, + {"TCPInfo.Last_ack_sent", Field, 1, ""}, + {"TCPInfo.Last_data_recv", Field, 1, ""}, + {"TCPInfo.Last_data_sent", Field, 1, ""}, + {"TCPInfo.Lost", Field, 1, ""}, + {"TCPInfo.Options", Field, 1, ""}, + {"TCPInfo.Pad_cgo_0", Field, 1, ""}, + {"TCPInfo.Pmtu", Field, 1, ""}, + {"TCPInfo.Probes", Field, 1, ""}, + {"TCPInfo.Rcv_mss", Field, 1, ""}, + {"TCPInfo.Rcv_rtt", Field, 1, ""}, + {"TCPInfo.Rcv_space", Field, 1, ""}, + {"TCPInfo.Rcv_ssthresh", Field, 1, ""}, + {"TCPInfo.Reordering", Field, 1, ""}, + {"TCPInfo.Retrans", Field, 1, ""}, + {"TCPInfo.Retransmits", Field, 1, ""}, + {"TCPInfo.Rto", Field, 1, ""}, + {"TCPInfo.Rtt", Field, 1, ""}, + {"TCPInfo.Rttvar", Field, 1, ""}, + {"TCPInfo.Sacked", Field, 1, ""}, + {"TCPInfo.Snd_cwnd", Field, 1, ""}, + {"TCPInfo.Snd_mss", Field, 1, ""}, + {"TCPInfo.Snd_ssthresh", Field, 1, ""}, + {"TCPInfo.State", Field, 1, ""}, + {"TCPInfo.Total_retrans", Field, 1, ""}, + {"TCPInfo.Unacked", Field, 1, ""}, + {"TCPKeepalive", Type, 3, ""}, + {"TCPKeepalive.Interval", Field, 3, ""}, + {"TCPKeepalive.OnOff", Field, 3, ""}, + {"TCPKeepalive.Time", Field, 3, ""}, + {"TCP_CA_NAME_MAX", Const, 0, ""}, + {"TCP_CONGCTL", Const, 1, ""}, + {"TCP_CONGESTION", Const, 0, ""}, + {"TCP_CONNECTIONTIMEOUT", Const, 0, ""}, + {"TCP_CORK", Const, 0, ""}, + {"TCP_DEFER_ACCEPT", Const, 0, ""}, + {"TCP_ENABLE_ECN", Const, 16, ""}, + {"TCP_INFO", Const, 0, ""}, + {"TCP_KEEPALIVE", Const, 0, ""}, + {"TCP_KEEPCNT", Const, 0, ""}, + {"TCP_KEEPIDLE", Const, 0, ""}, + {"TCP_KEEPINIT", Const, 1, ""}, + {"TCP_KEEPINTVL", Const, 0, ""}, + {"TCP_LINGER2", Const, 0, ""}, + {"TCP_MAXBURST", Const, 0, ""}, + {"TCP_MAXHLEN", Const, 0, ""}, + {"TCP_MAXOLEN", Const, 0, ""}, + {"TCP_MAXSEG", Const, 0, ""}, + {"TCP_MAXWIN", Const, 0, ""}, + {"TCP_MAX_SACK", Const, 0, ""}, + {"TCP_MAX_WINSHIFT", Const, 0, ""}, + {"TCP_MD5SIG", Const, 0, ""}, + {"TCP_MD5SIG_MAXKEYLEN", Const, 0, ""}, + {"TCP_MINMSS", Const, 0, ""}, + {"TCP_MINMSSOVERLOAD", Const, 0, ""}, + {"TCP_MSS", Const, 0, ""}, + {"TCP_NODELAY", Const, 0, ""}, + {"TCP_NOOPT", Const, 0, ""}, + {"TCP_NOPUSH", Const, 0, ""}, + {"TCP_NOTSENT_LOWAT", Const, 16, ""}, + {"TCP_NSTATES", Const, 1, ""}, + {"TCP_QUICKACK", Const, 0, ""}, + {"TCP_RXT_CONNDROPTIME", Const, 0, ""}, + {"TCP_RXT_FINDROP", Const, 0, ""}, + {"TCP_SACK_ENABLE", Const, 1, ""}, + {"TCP_SENDMOREACKS", Const, 16, ""}, + {"TCP_SYNCNT", Const, 0, ""}, + {"TCP_VENDOR", Const, 3, ""}, + {"TCP_WINDOW_CLAMP", Const, 0, ""}, + {"TCSAFLUSH", Const, 1, ""}, + {"TCSETS", Const, 0, ""}, + {"TF_DISCONNECT", Const, 0, ""}, + {"TF_REUSE_SOCKET", Const, 0, ""}, + {"TF_USE_DEFAULT_WORKER", Const, 0, ""}, + {"TF_USE_KERNEL_APC", Const, 0, ""}, + {"TF_USE_SYSTEM_THREAD", Const, 0, ""}, + {"TF_WRITE_BEHIND", Const, 0, ""}, + {"TH32CS_INHERIT", Const, 4, ""}, + {"TH32CS_SNAPALL", Const, 4, ""}, + {"TH32CS_SNAPHEAPLIST", Const, 4, ""}, + {"TH32CS_SNAPMODULE", Const, 4, ""}, + {"TH32CS_SNAPMODULE32", Const, 4, ""}, + {"TH32CS_SNAPPROCESS", Const, 4, ""}, + {"TH32CS_SNAPTHREAD", Const, 4, ""}, + {"TIME_ZONE_ID_DAYLIGHT", Const, 0, ""}, + {"TIME_ZONE_ID_STANDARD", Const, 0, ""}, + {"TIME_ZONE_ID_UNKNOWN", Const, 0, ""}, + {"TIOCCBRK", Const, 0, ""}, + {"TIOCCDTR", Const, 0, ""}, + {"TIOCCONS", Const, 0, ""}, + {"TIOCDCDTIMESTAMP", Const, 0, ""}, + {"TIOCDRAIN", Const, 0, ""}, + {"TIOCDSIMICROCODE", Const, 0, ""}, + {"TIOCEXCL", Const, 0, ""}, + {"TIOCEXT", Const, 0, ""}, + {"TIOCFLAG_CDTRCTS", Const, 1, ""}, + {"TIOCFLAG_CLOCAL", Const, 1, ""}, + {"TIOCFLAG_CRTSCTS", Const, 1, ""}, + {"TIOCFLAG_MDMBUF", Const, 1, ""}, + {"TIOCFLAG_PPS", Const, 1, ""}, + {"TIOCFLAG_SOFTCAR", Const, 1, ""}, + {"TIOCFLUSH", Const, 0, ""}, + {"TIOCGDEV", Const, 0, ""}, + {"TIOCGDRAINWAIT", Const, 0, ""}, + {"TIOCGETA", Const, 0, ""}, + {"TIOCGETD", Const, 0, ""}, + {"TIOCGFLAGS", Const, 1, ""}, + {"TIOCGICOUNT", Const, 0, ""}, + {"TIOCGLCKTRMIOS", Const, 0, ""}, + {"TIOCGLINED", Const, 1, ""}, + {"TIOCGPGRP", Const, 0, ""}, + {"TIOCGPTN", Const, 0, ""}, + {"TIOCGQSIZE", Const, 1, ""}, + {"TIOCGRANTPT", Const, 1, ""}, + {"TIOCGRS485", Const, 0, ""}, + {"TIOCGSERIAL", Const, 0, ""}, + {"TIOCGSID", Const, 0, ""}, + {"TIOCGSIZE", Const, 1, ""}, + {"TIOCGSOFTCAR", Const, 0, ""}, + {"TIOCGTSTAMP", Const, 1, ""}, + {"TIOCGWINSZ", Const, 0, ""}, + {"TIOCINQ", Const, 0, ""}, + {"TIOCIXOFF", Const, 0, ""}, + {"TIOCIXON", Const, 0, ""}, + {"TIOCLINUX", Const, 0, ""}, + {"TIOCMBIC", Const, 0, ""}, + {"TIOCMBIS", Const, 0, ""}, + {"TIOCMGDTRWAIT", Const, 0, ""}, + {"TIOCMGET", Const, 0, ""}, + {"TIOCMIWAIT", Const, 0, ""}, + {"TIOCMODG", Const, 0, ""}, + {"TIOCMODS", Const, 0, ""}, + {"TIOCMSDTRWAIT", Const, 0, ""}, + {"TIOCMSET", Const, 0, ""}, + {"TIOCM_CAR", Const, 0, ""}, + {"TIOCM_CD", Const, 0, ""}, + {"TIOCM_CTS", Const, 0, ""}, + {"TIOCM_DCD", Const, 0, ""}, + {"TIOCM_DSR", Const, 0, ""}, + {"TIOCM_DTR", Const, 0, ""}, + {"TIOCM_LE", Const, 0, ""}, + {"TIOCM_RI", Const, 0, ""}, + {"TIOCM_RNG", Const, 0, ""}, + {"TIOCM_RTS", Const, 0, ""}, + {"TIOCM_SR", Const, 0, ""}, + {"TIOCM_ST", Const, 0, ""}, + {"TIOCNOTTY", Const, 0, ""}, + {"TIOCNXCL", Const, 0, ""}, + {"TIOCOUTQ", Const, 0, ""}, + {"TIOCPKT", Const, 0, ""}, + {"TIOCPKT_DATA", Const, 0, ""}, + {"TIOCPKT_DOSTOP", Const, 0, ""}, + {"TIOCPKT_FLUSHREAD", Const, 0, ""}, + {"TIOCPKT_FLUSHWRITE", Const, 0, ""}, + {"TIOCPKT_IOCTL", Const, 0, ""}, + {"TIOCPKT_NOSTOP", Const, 0, ""}, + {"TIOCPKT_START", Const, 0, ""}, + {"TIOCPKT_STOP", Const, 0, ""}, + {"TIOCPTMASTER", Const, 0, ""}, + {"TIOCPTMGET", Const, 1, ""}, + {"TIOCPTSNAME", Const, 1, ""}, + {"TIOCPTYGNAME", Const, 0, ""}, + {"TIOCPTYGRANT", Const, 0, ""}, + {"TIOCPTYUNLK", Const, 0, ""}, + {"TIOCRCVFRAME", Const, 1, ""}, + {"TIOCREMOTE", Const, 0, ""}, + {"TIOCSBRK", Const, 0, ""}, + {"TIOCSCONS", Const, 0, ""}, + {"TIOCSCTTY", Const, 0, ""}, + {"TIOCSDRAINWAIT", Const, 0, ""}, + {"TIOCSDTR", Const, 0, ""}, + {"TIOCSERCONFIG", Const, 0, ""}, + {"TIOCSERGETLSR", Const, 0, ""}, + {"TIOCSERGETMULTI", Const, 0, ""}, + {"TIOCSERGSTRUCT", Const, 0, ""}, + {"TIOCSERGWILD", Const, 0, ""}, + {"TIOCSERSETMULTI", Const, 0, ""}, + {"TIOCSERSWILD", Const, 0, ""}, + {"TIOCSER_TEMT", Const, 0, ""}, + {"TIOCSETA", Const, 0, ""}, + {"TIOCSETAF", Const, 0, ""}, + {"TIOCSETAW", Const, 0, ""}, + {"TIOCSETD", Const, 0, ""}, + {"TIOCSFLAGS", Const, 1, ""}, + {"TIOCSIG", Const, 0, ""}, + {"TIOCSLCKTRMIOS", Const, 0, ""}, + {"TIOCSLINED", Const, 1, ""}, + {"TIOCSPGRP", Const, 0, ""}, + {"TIOCSPTLCK", Const, 0, ""}, + {"TIOCSQSIZE", Const, 1, ""}, + {"TIOCSRS485", Const, 0, ""}, + {"TIOCSSERIAL", Const, 0, ""}, + {"TIOCSSIZE", Const, 1, ""}, + {"TIOCSSOFTCAR", Const, 0, ""}, + {"TIOCSTART", Const, 0, ""}, + {"TIOCSTAT", Const, 0, ""}, + {"TIOCSTI", Const, 0, ""}, + {"TIOCSTOP", Const, 0, ""}, + {"TIOCSTSTAMP", Const, 1, ""}, + {"TIOCSWINSZ", Const, 0, ""}, + {"TIOCTIMESTAMP", Const, 0, ""}, + {"TIOCUCNTL", Const, 0, ""}, + {"TIOCVHANGUP", Const, 0, ""}, + {"TIOCXMTFRAME", Const, 1, ""}, + {"TOKEN_ADJUST_DEFAULT", Const, 0, ""}, + {"TOKEN_ADJUST_GROUPS", Const, 0, ""}, + {"TOKEN_ADJUST_PRIVILEGES", Const, 0, ""}, + {"TOKEN_ADJUST_SESSIONID", Const, 11, ""}, + {"TOKEN_ALL_ACCESS", Const, 0, ""}, + {"TOKEN_ASSIGN_PRIMARY", Const, 0, ""}, + {"TOKEN_DUPLICATE", Const, 0, ""}, + {"TOKEN_EXECUTE", Const, 0, ""}, + {"TOKEN_IMPERSONATE", Const, 0, ""}, + {"TOKEN_QUERY", Const, 0, ""}, + {"TOKEN_QUERY_SOURCE", Const, 0, ""}, + {"TOKEN_READ", Const, 0, ""}, + {"TOKEN_WRITE", Const, 0, ""}, + {"TOSTOP", Const, 0, ""}, + {"TRUNCATE_EXISTING", Const, 0, ""}, + {"TUNATTACHFILTER", Const, 0, ""}, + {"TUNDETACHFILTER", Const, 0, ""}, + {"TUNGETFEATURES", Const, 0, ""}, + {"TUNGETIFF", Const, 0, ""}, + {"TUNGETSNDBUF", Const, 0, ""}, + {"TUNGETVNETHDRSZ", Const, 0, ""}, + {"TUNSETDEBUG", Const, 0, ""}, + {"TUNSETGROUP", Const, 0, ""}, + {"TUNSETIFF", Const, 0, ""}, + {"TUNSETLINK", Const, 0, ""}, + {"TUNSETNOCSUM", Const, 0, ""}, + {"TUNSETOFFLOAD", Const, 0, ""}, + {"TUNSETOWNER", Const, 0, ""}, + {"TUNSETPERSIST", Const, 0, ""}, + {"TUNSETSNDBUF", Const, 0, ""}, + {"TUNSETTXFILTER", Const, 0, ""}, + {"TUNSETVNETHDRSZ", Const, 0, ""}, + {"Tee", Func, 0, "func(rfd int, wfd int, len int, flags int) (n int64, err error)"}, + {"TerminateProcess", Func, 0, ""}, + {"Termios", Type, 0, ""}, + {"Termios.Cc", Field, 0, ""}, + {"Termios.Cflag", Field, 0, ""}, + {"Termios.Iflag", Field, 0, ""}, + {"Termios.Ispeed", Field, 0, ""}, + {"Termios.Lflag", Field, 0, ""}, + {"Termios.Line", Field, 0, ""}, + {"Termios.Oflag", Field, 0, ""}, + {"Termios.Ospeed", Field, 0, ""}, + {"Termios.Pad_cgo_0", Field, 0, ""}, + {"Tgkill", Func, 0, "func(tgid int, tid int, sig Signal) (err error)"}, + {"Time", Func, 0, "func(t *Time_t) (tt Time_t, err error)"}, + {"Time_t", Type, 0, ""}, + {"Times", Func, 0, "func(tms *Tms) (ticks uintptr, err error)"}, + {"Timespec", Type, 0, ""}, + {"Timespec.Nsec", Field, 0, ""}, + {"Timespec.Pad_cgo_0", Field, 2, ""}, + {"Timespec.Sec", Field, 0, ""}, + {"TimespecToNsec", Func, 0, "func(ts Timespec) int64"}, + {"Timeval", Type, 0, ""}, + {"Timeval.Pad_cgo_0", Field, 0, ""}, + {"Timeval.Sec", Field, 0, ""}, + {"Timeval.Usec", Field, 0, ""}, + {"Timeval32", Type, 0, ""}, + {"Timeval32.Sec", Field, 0, ""}, + {"Timeval32.Usec", Field, 0, ""}, + {"TimevalToNsec", Func, 0, "func(tv Timeval) int64"}, + {"Timex", Type, 0, ""}, + {"Timex.Calcnt", Field, 0, ""}, + {"Timex.Constant", Field, 0, ""}, + {"Timex.Errcnt", Field, 0, ""}, + {"Timex.Esterror", Field, 0, ""}, + {"Timex.Freq", Field, 0, ""}, + {"Timex.Jitcnt", Field, 0, ""}, + {"Timex.Jitter", Field, 0, ""}, + {"Timex.Maxerror", Field, 0, ""}, + {"Timex.Modes", Field, 0, ""}, + {"Timex.Offset", Field, 0, ""}, + {"Timex.Pad_cgo_0", Field, 0, ""}, + {"Timex.Pad_cgo_1", Field, 0, ""}, + {"Timex.Pad_cgo_2", Field, 0, ""}, + {"Timex.Pad_cgo_3", Field, 0, ""}, + {"Timex.Ppsfreq", Field, 0, ""}, + {"Timex.Precision", Field, 0, ""}, + {"Timex.Shift", Field, 0, ""}, + {"Timex.Stabil", Field, 0, ""}, + {"Timex.Status", Field, 0, ""}, + {"Timex.Stbcnt", Field, 0, ""}, + {"Timex.Tai", Field, 0, ""}, + {"Timex.Tick", Field, 0, ""}, + {"Timex.Time", Field, 0, ""}, + {"Timex.Tolerance", Field, 0, ""}, + {"Timezoneinformation", Type, 0, ""}, + {"Timezoneinformation.Bias", Field, 0, ""}, + {"Timezoneinformation.DaylightBias", Field, 0, ""}, + {"Timezoneinformation.DaylightDate", Field, 0, ""}, + {"Timezoneinformation.DaylightName", Field, 0, ""}, + {"Timezoneinformation.StandardBias", Field, 0, ""}, + {"Timezoneinformation.StandardDate", Field, 0, ""}, + {"Timezoneinformation.StandardName", Field, 0, ""}, + {"Tms", Type, 0, ""}, + {"Tms.Cstime", Field, 0, ""}, + {"Tms.Cutime", Field, 0, ""}, + {"Tms.Stime", Field, 0, ""}, + {"Tms.Utime", Field, 0, ""}, + {"Token", Type, 0, ""}, + {"TokenAccessInformation", Const, 0, ""}, + {"TokenAuditPolicy", Const, 0, ""}, + {"TokenDefaultDacl", Const, 0, ""}, + {"TokenElevation", Const, 0, ""}, + {"TokenElevationType", Const, 0, ""}, + {"TokenGroups", Const, 0, ""}, + {"TokenGroupsAndPrivileges", Const, 0, ""}, + {"TokenHasRestrictions", Const, 0, ""}, + {"TokenImpersonationLevel", Const, 0, ""}, + {"TokenIntegrityLevel", Const, 0, ""}, + {"TokenLinkedToken", Const, 0, ""}, + {"TokenLogonSid", Const, 0, ""}, + {"TokenMandatoryPolicy", Const, 0, ""}, + {"TokenOrigin", Const, 0, ""}, + {"TokenOwner", Const, 0, ""}, + {"TokenPrimaryGroup", Const, 0, ""}, + {"TokenPrivileges", Const, 0, ""}, + {"TokenRestrictedSids", Const, 0, ""}, + {"TokenSandBoxInert", Const, 0, ""}, + {"TokenSessionId", Const, 0, ""}, + {"TokenSessionReference", Const, 0, ""}, + {"TokenSource", Const, 0, ""}, + {"TokenStatistics", Const, 0, ""}, + {"TokenType", Const, 0, ""}, + {"TokenUIAccess", Const, 0, ""}, + {"TokenUser", Const, 0, ""}, + {"TokenVirtualizationAllowed", Const, 0, ""}, + {"TokenVirtualizationEnabled", Const, 0, ""}, + {"Tokenprimarygroup", Type, 0, ""}, + {"Tokenprimarygroup.PrimaryGroup", Field, 0, ""}, + {"Tokenuser", Type, 0, ""}, + {"Tokenuser.User", Field, 0, ""}, + {"TranslateAccountName", Func, 0, ""}, + {"TranslateName", Func, 0, ""}, + {"TransmitFile", Func, 0, ""}, + {"TransmitFileBuffers", Type, 0, ""}, + {"TransmitFileBuffers.Head", Field, 0, ""}, + {"TransmitFileBuffers.HeadLength", Field, 0, ""}, + {"TransmitFileBuffers.Tail", Field, 0, ""}, + {"TransmitFileBuffers.TailLength", Field, 0, ""}, + {"Truncate", Func, 0, "func(path string, length int64) (err error)"}, + {"UNIX_PATH_MAX", Const, 12, ""}, + {"USAGE_MATCH_TYPE_AND", Const, 0, ""}, + {"USAGE_MATCH_TYPE_OR", Const, 0, ""}, + {"UTF16FromString", Func, 1, ""}, + {"UTF16PtrFromString", Func, 1, ""}, + {"UTF16ToString", Func, 0, ""}, + {"Ucred", Type, 0, ""}, + {"Ucred.Gid", Field, 0, ""}, + {"Ucred.Pid", Field, 0, ""}, + {"Ucred.Uid", Field, 0, ""}, + {"Umask", Func, 0, "func(mask int) (oldmask int)"}, + {"Uname", Func, 0, "func(buf *Utsname) (err error)"}, + {"Undelete", Func, 0, ""}, + {"UnixCredentials", Func, 0, "func(ucred *Ucred) []byte"}, + {"UnixRights", Func, 0, "func(fds ...int) []byte"}, + {"Unlink", Func, 0, "func(path string) error"}, + {"Unlinkat", Func, 0, "func(dirfd int, path string) error"}, + {"UnmapViewOfFile", Func, 0, ""}, + {"Unmount", Func, 0, "func(target string, flags int) (err error)"}, + {"Unsetenv", Func, 4, "func(key string) error"}, + {"Unshare", Func, 0, "func(flags int) (err error)"}, + {"UserInfo10", Type, 0, ""}, + {"UserInfo10.Comment", Field, 0, ""}, + {"UserInfo10.FullName", Field, 0, ""}, + {"UserInfo10.Name", Field, 0, ""}, + {"UserInfo10.UsrComment", Field, 0, ""}, + {"Ustat", Func, 0, "func(dev int, ubuf *Ustat_t) (err error)"}, + {"Ustat_t", Type, 0, ""}, + {"Ustat_t.Fname", Field, 0, ""}, + {"Ustat_t.Fpack", Field, 0, ""}, + {"Ustat_t.Pad_cgo_0", Field, 0, ""}, + {"Ustat_t.Pad_cgo_1", Field, 0, ""}, + {"Ustat_t.Tfree", Field, 0, ""}, + {"Ustat_t.Tinode", Field, 0, ""}, + {"Utimbuf", Type, 0, ""}, + {"Utimbuf.Actime", Field, 0, ""}, + {"Utimbuf.Modtime", Field, 0, ""}, + {"Utime", Func, 0, "func(path string, buf *Utimbuf) (err error)"}, + {"Utimes", Func, 0, "func(path string, tv []Timeval) (err error)"}, + {"UtimesNano", Func, 1, "func(path string, ts []Timespec) (err error)"}, + {"Utsname", Type, 0, ""}, + {"Utsname.Domainname", Field, 0, ""}, + {"Utsname.Machine", Field, 0, ""}, + {"Utsname.Nodename", Field, 0, ""}, + {"Utsname.Release", Field, 0, ""}, + {"Utsname.Sysname", Field, 0, ""}, + {"Utsname.Version", Field, 0, ""}, + {"VDISCARD", Const, 0, ""}, + {"VDSUSP", Const, 1, ""}, + {"VEOF", Const, 0, ""}, + {"VEOL", Const, 0, ""}, + {"VEOL2", Const, 0, ""}, + {"VERASE", Const, 0, ""}, + {"VERASE2", Const, 1, ""}, + {"VINTR", Const, 0, ""}, + {"VKILL", Const, 0, ""}, + {"VLNEXT", Const, 0, ""}, + {"VMIN", Const, 0, ""}, + {"VQUIT", Const, 0, ""}, + {"VREPRINT", Const, 0, ""}, + {"VSTART", Const, 0, ""}, + {"VSTATUS", Const, 1, ""}, + {"VSTOP", Const, 0, ""}, + {"VSUSP", Const, 0, ""}, + {"VSWTC", Const, 0, ""}, + {"VT0", Const, 1, ""}, + {"VT1", Const, 1, ""}, + {"VTDLY", Const, 1, ""}, + {"VTIME", Const, 0, ""}, + {"VWERASE", Const, 0, ""}, + {"VirtualLock", Func, 0, ""}, + {"VirtualUnlock", Func, 0, ""}, + {"WAIT_ABANDONED", Const, 0, ""}, + {"WAIT_FAILED", Const, 0, ""}, + {"WAIT_OBJECT_0", Const, 0, ""}, + {"WAIT_TIMEOUT", Const, 0, ""}, + {"WALL", Const, 0, ""}, + {"WALLSIG", Const, 1, ""}, + {"WALTSIG", Const, 1, ""}, + {"WCLONE", Const, 0, ""}, + {"WCONTINUED", Const, 0, ""}, + {"WCOREFLAG", Const, 0, ""}, + {"WEXITED", Const, 0, ""}, + {"WLINUXCLONE", Const, 0, ""}, + {"WNOHANG", Const, 0, ""}, + {"WNOTHREAD", Const, 0, ""}, + {"WNOWAIT", Const, 0, ""}, + {"WNOZOMBIE", Const, 1, ""}, + {"WOPTSCHECKED", Const, 1, ""}, + {"WORDSIZE", Const, 0, ""}, + {"WSABuf", Type, 0, ""}, + {"WSABuf.Buf", Field, 0, ""}, + {"WSABuf.Len", Field, 0, ""}, + {"WSACleanup", Func, 0, ""}, + {"WSADESCRIPTION_LEN", Const, 0, ""}, + {"WSAData", Type, 0, ""}, + {"WSAData.Description", Field, 0, ""}, + {"WSAData.HighVersion", Field, 0, ""}, + {"WSAData.MaxSockets", Field, 0, ""}, + {"WSAData.MaxUdpDg", Field, 0, ""}, + {"WSAData.SystemStatus", Field, 0, ""}, + {"WSAData.VendorInfo", Field, 0, ""}, + {"WSAData.Version", Field, 0, ""}, + {"WSAEACCES", Const, 2, ""}, + {"WSAECONNABORTED", Const, 9, ""}, + {"WSAECONNRESET", Const, 3, ""}, + {"WSAENOPROTOOPT", Const, 23, ""}, + {"WSAEnumProtocols", Func, 2, ""}, + {"WSAID_CONNECTEX", Var, 1, ""}, + {"WSAIoctl", Func, 0, ""}, + {"WSAPROTOCOL_LEN", Const, 2, ""}, + {"WSAProtocolChain", Type, 2, ""}, + {"WSAProtocolChain.ChainEntries", Field, 2, ""}, + {"WSAProtocolChain.ChainLen", Field, 2, ""}, + {"WSAProtocolInfo", Type, 2, ""}, + {"WSAProtocolInfo.AddressFamily", Field, 2, ""}, + {"WSAProtocolInfo.CatalogEntryId", Field, 2, ""}, + {"WSAProtocolInfo.MaxSockAddr", Field, 2, ""}, + {"WSAProtocolInfo.MessageSize", Field, 2, ""}, + {"WSAProtocolInfo.MinSockAddr", Field, 2, ""}, + {"WSAProtocolInfo.NetworkByteOrder", Field, 2, ""}, + {"WSAProtocolInfo.Protocol", Field, 2, ""}, + {"WSAProtocolInfo.ProtocolChain", Field, 2, ""}, + {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2, ""}, + {"WSAProtocolInfo.ProtocolName", Field, 2, ""}, + {"WSAProtocolInfo.ProviderFlags", Field, 2, ""}, + {"WSAProtocolInfo.ProviderId", Field, 2, ""}, + {"WSAProtocolInfo.ProviderReserved", Field, 2, ""}, + {"WSAProtocolInfo.SecurityScheme", Field, 2, ""}, + {"WSAProtocolInfo.ServiceFlags1", Field, 2, ""}, + {"WSAProtocolInfo.ServiceFlags2", Field, 2, ""}, + {"WSAProtocolInfo.ServiceFlags3", Field, 2, ""}, + {"WSAProtocolInfo.ServiceFlags4", Field, 2, ""}, + {"WSAProtocolInfo.SocketType", Field, 2, ""}, + {"WSAProtocolInfo.Version", Field, 2, ""}, + {"WSARecv", Func, 0, ""}, + {"WSARecvFrom", Func, 0, ""}, + {"WSASYS_STATUS_LEN", Const, 0, ""}, + {"WSASend", Func, 0, ""}, + {"WSASendTo", Func, 0, ""}, + {"WSASendto", Func, 0, ""}, + {"WSAStartup", Func, 0, ""}, + {"WSTOPPED", Const, 0, ""}, + {"WTRAPPED", Const, 1, ""}, + {"WUNTRACED", Const, 0, ""}, + {"Wait4", Func, 0, "func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)"}, + {"WaitForSingleObject", Func, 0, ""}, + {"WaitStatus", Type, 0, ""}, + {"WaitStatus.ExitCode", Field, 0, ""}, + {"Win32FileAttributeData", Type, 0, ""}, + {"Win32FileAttributeData.CreationTime", Field, 0, ""}, + {"Win32FileAttributeData.FileAttributes", Field, 0, ""}, + {"Win32FileAttributeData.FileSizeHigh", Field, 0, ""}, + {"Win32FileAttributeData.FileSizeLow", Field, 0, ""}, + {"Win32FileAttributeData.LastAccessTime", Field, 0, ""}, + {"Win32FileAttributeData.LastWriteTime", Field, 0, ""}, + {"Win32finddata", Type, 0, ""}, + {"Win32finddata.AlternateFileName", Field, 0, ""}, + {"Win32finddata.CreationTime", Field, 0, ""}, + {"Win32finddata.FileAttributes", Field, 0, ""}, + {"Win32finddata.FileName", Field, 0, ""}, + {"Win32finddata.FileSizeHigh", Field, 0, ""}, + {"Win32finddata.FileSizeLow", Field, 0, ""}, + {"Win32finddata.LastAccessTime", Field, 0, ""}, + {"Win32finddata.LastWriteTime", Field, 0, ""}, + {"Win32finddata.Reserved0", Field, 0, ""}, + {"Win32finddata.Reserved1", Field, 0, ""}, + {"Write", Func, 0, "func(fd int, p []byte) (n int, err error)"}, + {"WriteConsole", Func, 1, ""}, + {"WriteFile", Func, 0, ""}, + {"X509_ASN_ENCODING", Const, 0, ""}, + {"XCASE", Const, 0, ""}, + {"XP1_CONNECTIONLESS", Const, 2, ""}, + {"XP1_CONNECT_DATA", Const, 2, ""}, + {"XP1_DISCONNECT_DATA", Const, 2, ""}, + {"XP1_EXPEDITED_DATA", Const, 2, ""}, + {"XP1_GRACEFUL_CLOSE", Const, 2, ""}, + {"XP1_GUARANTEED_DELIVERY", Const, 2, ""}, + {"XP1_GUARANTEED_ORDER", Const, 2, ""}, + {"XP1_IFS_HANDLES", Const, 2, ""}, + {"XP1_MESSAGE_ORIENTED", Const, 2, ""}, + {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2, ""}, + {"XP1_MULTIPOINT_DATA_PLANE", Const, 2, ""}, + {"XP1_PARTIAL_MESSAGE", Const, 2, ""}, + {"XP1_PSEUDO_STREAM", Const, 2, ""}, + {"XP1_QOS_SUPPORTED", Const, 2, ""}, + {"XP1_SAN_SUPPORT_SDP", Const, 2, ""}, + {"XP1_SUPPORT_BROADCAST", Const, 2, ""}, + {"XP1_SUPPORT_MULTIPOINT", Const, 2, ""}, + {"XP1_UNI_RECV", Const, 2, ""}, + {"XP1_UNI_SEND", Const, 2, ""}, + }, + "syscall/js": { + {"CopyBytesToGo", Func, 0, ""}, + {"CopyBytesToJS", Func, 0, ""}, + {"Error", Type, 0, ""}, + {"Func", Type, 0, ""}, + {"FuncOf", Func, 0, ""}, + {"Global", Func, 0, ""}, + {"Null", Func, 0, ""}, + {"Type", Type, 0, ""}, + {"TypeBoolean", Const, 0, ""}, + {"TypeFunction", Const, 0, ""}, + {"TypeNull", Const, 0, ""}, + {"TypeNumber", Const, 0, ""}, + {"TypeObject", Const, 0, ""}, + {"TypeString", Const, 0, ""}, + {"TypeSymbol", Const, 0, ""}, + {"TypeUndefined", Const, 0, ""}, + {"Undefined", Func, 0, ""}, + {"Value", Type, 0, ""}, + {"ValueError", Type, 0, ""}, + {"ValueOf", Func, 0, ""}, + }, + "testing": { + {"(*B).Chdir", Method, 24, ""}, + {"(*B).Cleanup", Method, 14, ""}, + {"(*B).Context", Method, 24, ""}, + {"(*B).Elapsed", Method, 20, ""}, + {"(*B).Error", Method, 0, ""}, + {"(*B).Errorf", Method, 0, ""}, + {"(*B).Fail", Method, 0, ""}, + {"(*B).FailNow", Method, 0, ""}, + {"(*B).Failed", Method, 0, ""}, + {"(*B).Fatal", Method, 0, ""}, + {"(*B).Fatalf", Method, 0, ""}, + {"(*B).Helper", Method, 9, ""}, + {"(*B).Log", Method, 0, ""}, + {"(*B).Logf", Method, 0, ""}, + {"(*B).Loop", Method, 24, ""}, + {"(*B).Name", Method, 8, ""}, + {"(*B).ReportAllocs", Method, 1, ""}, + {"(*B).ReportMetric", Method, 13, ""}, + {"(*B).ResetTimer", Method, 0, ""}, + {"(*B).Run", Method, 7, ""}, + {"(*B).RunParallel", Method, 3, ""}, + {"(*B).SetBytes", Method, 0, ""}, + {"(*B).SetParallelism", Method, 3, ""}, + {"(*B).Setenv", Method, 17, ""}, + {"(*B).Skip", Method, 1, ""}, + {"(*B).SkipNow", Method, 1, ""}, + {"(*B).Skipf", Method, 1, ""}, + {"(*B).Skipped", Method, 1, ""}, + {"(*B).StartTimer", Method, 0, ""}, + {"(*B).StopTimer", Method, 0, ""}, + {"(*B).TempDir", Method, 15, ""}, + {"(*F).Add", Method, 18, ""}, + {"(*F).Chdir", Method, 24, ""}, + {"(*F).Cleanup", Method, 18, ""}, + {"(*F).Context", Method, 24, ""}, + {"(*F).Error", Method, 18, ""}, + {"(*F).Errorf", Method, 18, ""}, + {"(*F).Fail", Method, 18, ""}, + {"(*F).FailNow", Method, 18, ""}, + {"(*F).Failed", Method, 18, ""}, + {"(*F).Fatal", Method, 18, ""}, + {"(*F).Fatalf", Method, 18, ""}, + {"(*F).Fuzz", Method, 18, ""}, + {"(*F).Helper", Method, 18, ""}, + {"(*F).Log", Method, 18, ""}, + {"(*F).Logf", Method, 18, ""}, + {"(*F).Name", Method, 18, ""}, + {"(*F).Setenv", Method, 18, ""}, + {"(*F).Skip", Method, 18, ""}, + {"(*F).SkipNow", Method, 18, ""}, + {"(*F).Skipf", Method, 18, ""}, + {"(*F).Skipped", Method, 18, ""}, + {"(*F).TempDir", Method, 18, ""}, + {"(*M).Run", Method, 4, ""}, + {"(*PB).Next", Method, 3, ""}, + {"(*T).Chdir", Method, 24, ""}, + {"(*T).Cleanup", Method, 14, ""}, + {"(*T).Context", Method, 24, ""}, + {"(*T).Deadline", Method, 15, ""}, + {"(*T).Error", Method, 0, ""}, + {"(*T).Errorf", Method, 0, ""}, + {"(*T).Fail", Method, 0, ""}, + {"(*T).FailNow", Method, 0, ""}, + {"(*T).Failed", Method, 0, ""}, + {"(*T).Fatal", Method, 0, ""}, + {"(*T).Fatalf", Method, 0, ""}, + {"(*T).Helper", Method, 9, ""}, + {"(*T).Log", Method, 0, ""}, + {"(*T).Logf", Method, 0, ""}, + {"(*T).Name", Method, 8, ""}, + {"(*T).Parallel", Method, 0, ""}, + {"(*T).Run", Method, 7, ""}, + {"(*T).Setenv", Method, 17, ""}, + {"(*T).Skip", Method, 1, ""}, + {"(*T).SkipNow", Method, 1, ""}, + {"(*T).Skipf", Method, 1, ""}, + {"(*T).Skipped", Method, 1, ""}, + {"(*T).TempDir", Method, 15, ""}, + {"(BenchmarkResult).AllocedBytesPerOp", Method, 1, ""}, + {"(BenchmarkResult).AllocsPerOp", Method, 1, ""}, + {"(BenchmarkResult).MemString", Method, 1, ""}, + {"(BenchmarkResult).NsPerOp", Method, 0, ""}, + {"(BenchmarkResult).String", Method, 0, ""}, + {"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"}, + {"B", Type, 0, ""}, + {"B.N", Field, 0, ""}, + {"Benchmark", Func, 0, "func(f func(b *B)) BenchmarkResult"}, + {"BenchmarkResult", Type, 0, ""}, + {"BenchmarkResult.Bytes", Field, 0, ""}, + {"BenchmarkResult.Extra", Field, 13, ""}, + {"BenchmarkResult.MemAllocs", Field, 1, ""}, + {"BenchmarkResult.MemBytes", Field, 1, ""}, + {"BenchmarkResult.N", Field, 0, ""}, + {"BenchmarkResult.T", Field, 0, ""}, + {"Cover", Type, 2, ""}, + {"Cover.Blocks", Field, 2, ""}, + {"Cover.Counters", Field, 2, ""}, + {"Cover.CoveredPackages", Field, 2, ""}, + {"Cover.Mode", Field, 2, ""}, + {"CoverBlock", Type, 2, ""}, + {"CoverBlock.Col0", Field, 2, ""}, + {"CoverBlock.Col1", Field, 2, ""}, + {"CoverBlock.Line0", Field, 2, ""}, + {"CoverBlock.Line1", Field, 2, ""}, + {"CoverBlock.Stmts", Field, 2, ""}, + {"CoverMode", Func, 8, "func() string"}, + {"Coverage", Func, 4, "func() float64"}, + {"F", Type, 18, ""}, + {"Init", Func, 13, "func()"}, + {"InternalBenchmark", Type, 0, ""}, + {"InternalBenchmark.F", Field, 0, ""}, + {"InternalBenchmark.Name", Field, 0, ""}, + {"InternalExample", Type, 0, ""}, + {"InternalExample.F", Field, 0, ""}, + {"InternalExample.Name", Field, 0, ""}, + {"InternalExample.Output", Field, 0, ""}, + {"InternalExample.Unordered", Field, 7, ""}, + {"InternalFuzzTarget", Type, 18, ""}, + {"InternalFuzzTarget.Fn", Field, 18, ""}, + {"InternalFuzzTarget.Name", Field, 18, ""}, + {"InternalTest", Type, 0, ""}, + {"InternalTest.F", Field, 0, ""}, + {"InternalTest.Name", Field, 0, ""}, + {"M", Type, 4, ""}, + {"Main", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)"}, + {"MainStart", Func, 4, "func(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M"}, + {"PB", Type, 3, ""}, + {"RegisterCover", Func, 2, "func(c Cover)"}, + {"RunBenchmarks", Func, 0, "func(matchString func(pat string, str string) (bool, error), benchmarks []InternalBenchmark)"}, + {"RunExamples", Func, 0, "func(matchString func(pat string, str string) (bool, error), examples []InternalExample) (ok bool)"}, + {"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"}, + {"Short", Func, 0, "func() bool"}, + {"T", Type, 0, ""}, + {"TB", Type, 2, ""}, + {"Testing", Func, 21, "func() bool"}, + {"Verbose", Func, 1, "func() bool"}, + }, + "testing/fstest": { + {"(MapFS).Glob", Method, 16, ""}, + {"(MapFS).Lstat", Method, 25, ""}, + {"(MapFS).Open", Method, 16, ""}, + {"(MapFS).ReadDir", Method, 16, ""}, + {"(MapFS).ReadFile", Method, 16, ""}, + {"(MapFS).ReadLink", Method, 25, ""}, + {"(MapFS).Stat", Method, 16, ""}, + {"(MapFS).Sub", Method, 16, ""}, + {"MapFS", Type, 16, ""}, + {"MapFile", Type, 16, ""}, + {"MapFile.Data", Field, 16, ""}, + {"MapFile.ModTime", Field, 16, ""}, + {"MapFile.Mode", Field, 16, ""}, + {"MapFile.Sys", Field, 16, ""}, + {"TestFS", Func, 16, "func(fsys fs.FS, expected ...string) error"}, + }, + "testing/iotest": { + {"DataErrReader", Func, 0, "func(r io.Reader) io.Reader"}, + {"ErrReader", Func, 16, "func(err error) io.Reader"}, + {"ErrTimeout", Var, 0, ""}, + {"HalfReader", Func, 0, "func(r io.Reader) io.Reader"}, + {"NewReadLogger", Func, 0, "func(prefix string, r io.Reader) io.Reader"}, + {"NewWriteLogger", Func, 0, "func(prefix string, w io.Writer) io.Writer"}, + {"OneByteReader", Func, 0, "func(r io.Reader) io.Reader"}, + {"TestReader", Func, 16, "func(r io.Reader, content []byte) error"}, + {"TimeoutReader", Func, 0, "func(r io.Reader) io.Reader"}, + {"TruncateWriter", Func, 0, "func(w io.Writer, n int64) io.Writer"}, + }, + "testing/quick": { + {"(*CheckEqualError).Error", Method, 0, ""}, + {"(*CheckError).Error", Method, 0, ""}, + {"(SetupError).Error", Method, 0, ""}, + {"Check", Func, 0, "func(f any, config *Config) error"}, + {"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"}, + {"CheckEqualError", Type, 0, ""}, + {"CheckEqualError.CheckError", Field, 0, ""}, + {"CheckEqualError.Out1", Field, 0, ""}, + {"CheckEqualError.Out2", Field, 0, ""}, + {"CheckError", Type, 0, ""}, + {"CheckError.Count", Field, 0, ""}, + {"CheckError.In", Field, 0, ""}, + {"Config", Type, 0, ""}, + {"Config.MaxCount", Field, 0, ""}, + {"Config.MaxCountScale", Field, 0, ""}, + {"Config.Rand", Field, 0, ""}, + {"Config.Values", Field, 0, ""}, + {"Generator", Type, 0, ""}, + {"SetupError", Type, 0, ""}, + {"Value", Func, 0, "func(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool)"}, + }, + "testing/slogtest": { + {"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"}, + {"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"}, + }, + "text/scanner": { + {"(*Position).IsValid", Method, 0, ""}, + {"(*Scanner).Init", Method, 0, ""}, + {"(*Scanner).IsValid", Method, 0, ""}, + {"(*Scanner).Next", Method, 0, ""}, + {"(*Scanner).Peek", Method, 0, ""}, + {"(*Scanner).Pos", Method, 0, ""}, + {"(*Scanner).Scan", Method, 0, ""}, + {"(*Scanner).TokenText", Method, 0, ""}, + {"(Position).String", Method, 0, ""}, + {"(Scanner).String", Method, 0, ""}, + {"Char", Const, 0, ""}, + {"Comment", Const, 0, ""}, + {"EOF", Const, 0, ""}, + {"Float", Const, 0, ""}, + {"GoTokens", Const, 0, ""}, + {"GoWhitespace", Const, 0, ""}, + {"Ident", Const, 0, ""}, + {"Int", Const, 0, ""}, + {"Position", Type, 0, ""}, + {"Position.Column", Field, 0, ""}, + {"Position.Filename", Field, 0, ""}, + {"Position.Line", Field, 0, ""}, + {"Position.Offset", Field, 0, ""}, + {"RawString", Const, 0, ""}, + {"ScanChars", Const, 0, ""}, + {"ScanComments", Const, 0, ""}, + {"ScanFloats", Const, 0, ""}, + {"ScanIdents", Const, 0, ""}, + {"ScanInts", Const, 0, ""}, + {"ScanRawStrings", Const, 0, ""}, + {"ScanStrings", Const, 0, ""}, + {"Scanner", Type, 0, ""}, + {"Scanner.Error", Field, 0, ""}, + {"Scanner.ErrorCount", Field, 0, ""}, + {"Scanner.IsIdentRune", Field, 4, ""}, + {"Scanner.Mode", Field, 0, ""}, + {"Scanner.Position", Field, 0, ""}, + {"Scanner.Whitespace", Field, 0, ""}, + {"SkipComments", Const, 0, ""}, + {"String", Const, 0, ""}, + {"TokenString", Func, 0, "func(tok rune) string"}, + }, + "text/tabwriter": { + {"(*Writer).Flush", Method, 0, ""}, + {"(*Writer).Init", Method, 0, ""}, + {"(*Writer).Write", Method, 0, ""}, + {"AlignRight", Const, 0, ""}, + {"Debug", Const, 0, ""}, + {"DiscardEmptyColumns", Const, 0, ""}, + {"Escape", Const, 0, ""}, + {"FilterHTML", Const, 0, ""}, + {"NewWriter", Func, 0, "func(output io.Writer, minwidth int, tabwidth int, padding int, padchar byte, flags uint) *Writer"}, + {"StripEscape", Const, 0, ""}, + {"TabIndent", Const, 0, ""}, + {"Writer", Type, 0, ""}, + }, + "text/template": { + {"(*Template).AddParseTree", Method, 0, ""}, + {"(*Template).Clone", Method, 0, ""}, + {"(*Template).DefinedTemplates", Method, 5, ""}, + {"(*Template).Delims", Method, 0, ""}, + {"(*Template).Execute", Method, 0, ""}, + {"(*Template).ExecuteTemplate", Method, 0, ""}, + {"(*Template).Funcs", Method, 0, ""}, + {"(*Template).Lookup", Method, 0, ""}, + {"(*Template).Name", Method, 0, ""}, + {"(*Template).New", Method, 0, ""}, + {"(*Template).Option", Method, 5, ""}, + {"(*Template).Parse", Method, 0, ""}, + {"(*Template).ParseFS", Method, 16, ""}, + {"(*Template).ParseFiles", Method, 0, ""}, + {"(*Template).ParseGlob", Method, 0, ""}, + {"(*Template).Templates", Method, 0, ""}, + {"(ExecError).Error", Method, 6, ""}, + {"(ExecError).Unwrap", Method, 13, ""}, + {"(Template).Copy", Method, 2, ""}, + {"(Template).ErrorContext", Method, 1, ""}, + {"ExecError", Type, 6, ""}, + {"ExecError.Err", Field, 6, ""}, + {"ExecError.Name", Field, 6, ""}, + {"FuncMap", Type, 0, ""}, + {"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"}, + {"HTMLEscapeString", Func, 0, "func(s string) string"}, + {"HTMLEscaper", Func, 0, "func(args ...any) string"}, + {"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"}, + {"JSEscape", Func, 0, "func(w io.Writer, b []byte)"}, + {"JSEscapeString", Func, 0, "func(s string) string"}, + {"JSEscaper", Func, 0, "func(args ...any) string"}, + {"Must", Func, 0, "func(t *Template, err error) *Template"}, + {"New", Func, 0, "func(name string) *Template"}, + {"ParseFS", Func, 16, "func(fsys fs.FS, patterns ...string) (*Template, error)"}, + {"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"}, + {"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"}, + {"Template", Type, 0, ""}, + {"Template.Tree", Field, 0, ""}, + {"URLQueryEscaper", Func, 0, "func(args ...any) string"}, + }, + "text/template/parse": { + {"(*ActionNode).Copy", Method, 0, ""}, + {"(*ActionNode).String", Method, 0, ""}, + {"(*BoolNode).Copy", Method, 0, ""}, + {"(*BoolNode).String", Method, 0, ""}, + {"(*BranchNode).Copy", Method, 4, ""}, + {"(*BranchNode).String", Method, 0, ""}, + {"(*BreakNode).Copy", Method, 18, ""}, + {"(*BreakNode).String", Method, 18, ""}, + {"(*ChainNode).Add", Method, 1, ""}, + {"(*ChainNode).Copy", Method, 1, ""}, + {"(*ChainNode).String", Method, 1, ""}, + {"(*CommandNode).Copy", Method, 0, ""}, + {"(*CommandNode).String", Method, 0, ""}, + {"(*CommentNode).Copy", Method, 16, ""}, + {"(*CommentNode).String", Method, 16, ""}, + {"(*ContinueNode).Copy", Method, 18, ""}, + {"(*ContinueNode).String", Method, 18, ""}, + {"(*DotNode).Copy", Method, 0, ""}, + {"(*DotNode).String", Method, 0, ""}, + {"(*DotNode).Type", Method, 0, ""}, + {"(*FieldNode).Copy", Method, 0, ""}, + {"(*FieldNode).String", Method, 0, ""}, + {"(*IdentifierNode).Copy", Method, 0, ""}, + {"(*IdentifierNode).SetPos", Method, 1, ""}, + {"(*IdentifierNode).SetTree", Method, 4, ""}, + {"(*IdentifierNode).String", Method, 0, ""}, + {"(*IfNode).Copy", Method, 0, ""}, + {"(*IfNode).String", Method, 0, ""}, + {"(*ListNode).Copy", Method, 0, ""}, + {"(*ListNode).CopyList", Method, 0, ""}, + {"(*ListNode).String", Method, 0, ""}, + {"(*NilNode).Copy", Method, 1, ""}, + {"(*NilNode).String", Method, 1, ""}, + {"(*NilNode).Type", Method, 1, ""}, + {"(*NumberNode).Copy", Method, 0, ""}, + {"(*NumberNode).String", Method, 0, ""}, + {"(*PipeNode).Copy", Method, 0, ""}, + {"(*PipeNode).CopyPipe", Method, 0, ""}, + {"(*PipeNode).String", Method, 0, ""}, + {"(*RangeNode).Copy", Method, 0, ""}, + {"(*RangeNode).String", Method, 0, ""}, + {"(*StringNode).Copy", Method, 0, ""}, + {"(*StringNode).String", Method, 0, ""}, + {"(*TemplateNode).Copy", Method, 0, ""}, + {"(*TemplateNode).String", Method, 0, ""}, + {"(*TextNode).Copy", Method, 0, ""}, + {"(*TextNode).String", Method, 0, ""}, + {"(*Tree).Copy", Method, 2, ""}, + {"(*Tree).ErrorContext", Method, 1, ""}, + {"(*Tree).Parse", Method, 0, ""}, + {"(*VariableNode).Copy", Method, 0, ""}, + {"(*VariableNode).String", Method, 0, ""}, + {"(*WithNode).Copy", Method, 0, ""}, + {"(*WithNode).String", Method, 0, ""}, + {"(ActionNode).Position", Method, 1, ""}, + {"(ActionNode).Type", Method, 0, ""}, + {"(BoolNode).Position", Method, 1, ""}, + {"(BoolNode).Type", Method, 0, ""}, + {"(BranchNode).Position", Method, 1, ""}, + {"(BranchNode).Type", Method, 0, ""}, + {"(BreakNode).Position", Method, 18, ""}, + {"(BreakNode).Type", Method, 18, ""}, + {"(ChainNode).Position", Method, 1, ""}, + {"(ChainNode).Type", Method, 1, ""}, + {"(CommandNode).Position", Method, 1, ""}, + {"(CommandNode).Type", Method, 0, ""}, + {"(CommentNode).Position", Method, 16, ""}, + {"(CommentNode).Type", Method, 16, ""}, + {"(ContinueNode).Position", Method, 18, ""}, + {"(ContinueNode).Type", Method, 18, ""}, + {"(DotNode).Position", Method, 1, ""}, + {"(FieldNode).Position", Method, 1, ""}, + {"(FieldNode).Type", Method, 0, ""}, + {"(IdentifierNode).Position", Method, 1, ""}, + {"(IdentifierNode).Type", Method, 0, ""}, + {"(IfNode).Position", Method, 1, ""}, + {"(IfNode).Type", Method, 0, ""}, + {"(ListNode).Position", Method, 1, ""}, + {"(ListNode).Type", Method, 0, ""}, + {"(NilNode).Position", Method, 1, ""}, + {"(NodeType).Type", Method, 0, ""}, + {"(NumberNode).Position", Method, 1, ""}, + {"(NumberNode).Type", Method, 0, ""}, + {"(PipeNode).Position", Method, 1, ""}, + {"(PipeNode).Type", Method, 0, ""}, + {"(Pos).Position", Method, 1, ""}, + {"(RangeNode).Position", Method, 1, ""}, + {"(RangeNode).Type", Method, 0, ""}, + {"(StringNode).Position", Method, 1, ""}, + {"(StringNode).Type", Method, 0, ""}, + {"(TemplateNode).Position", Method, 1, ""}, + {"(TemplateNode).Type", Method, 0, ""}, + {"(TextNode).Position", Method, 1, ""}, + {"(TextNode).Type", Method, 0, ""}, + {"(VariableNode).Position", Method, 1, ""}, + {"(VariableNode).Type", Method, 0, ""}, + {"(WithNode).Position", Method, 1, ""}, + {"(WithNode).Type", Method, 0, ""}, + {"ActionNode", Type, 0, ""}, + {"ActionNode.Line", Field, 0, ""}, + {"ActionNode.NodeType", Field, 0, ""}, + {"ActionNode.Pipe", Field, 0, ""}, + {"ActionNode.Pos", Field, 1, ""}, + {"BoolNode", Type, 0, ""}, + {"BoolNode.NodeType", Field, 0, ""}, + {"BoolNode.Pos", Field, 1, ""}, + {"BoolNode.True", Field, 0, ""}, + {"BranchNode", Type, 0, ""}, + {"BranchNode.ElseList", Field, 0, ""}, + {"BranchNode.Line", Field, 0, ""}, + {"BranchNode.List", Field, 0, ""}, + {"BranchNode.NodeType", Field, 0, ""}, + {"BranchNode.Pipe", Field, 0, ""}, + {"BranchNode.Pos", Field, 1, ""}, + {"BreakNode", Type, 18, ""}, + {"BreakNode.Line", Field, 18, ""}, + {"BreakNode.NodeType", Field, 18, ""}, + {"BreakNode.Pos", Field, 18, ""}, + {"ChainNode", Type, 1, ""}, + {"ChainNode.Field", Field, 1, ""}, + {"ChainNode.Node", Field, 1, ""}, + {"ChainNode.NodeType", Field, 1, ""}, + {"ChainNode.Pos", Field, 1, ""}, + {"CommandNode", Type, 0, ""}, + {"CommandNode.Args", Field, 0, ""}, + {"CommandNode.NodeType", Field, 0, ""}, + {"CommandNode.Pos", Field, 1, ""}, + {"CommentNode", Type, 16, ""}, + {"CommentNode.NodeType", Field, 16, ""}, + {"CommentNode.Pos", Field, 16, ""}, + {"CommentNode.Text", Field, 16, ""}, + {"ContinueNode", Type, 18, ""}, + {"ContinueNode.Line", Field, 18, ""}, + {"ContinueNode.NodeType", Field, 18, ""}, + {"ContinueNode.Pos", Field, 18, ""}, + {"DotNode", Type, 0, ""}, + {"DotNode.NodeType", Field, 4, ""}, + {"DotNode.Pos", Field, 1, ""}, + {"FieldNode", Type, 0, ""}, + {"FieldNode.Ident", Field, 0, ""}, + {"FieldNode.NodeType", Field, 0, ""}, + {"FieldNode.Pos", Field, 1, ""}, + {"IdentifierNode", Type, 0, ""}, + {"IdentifierNode.Ident", Field, 0, ""}, + {"IdentifierNode.NodeType", Field, 0, ""}, + {"IdentifierNode.Pos", Field, 1, ""}, + {"IfNode", Type, 0, ""}, + {"IfNode.BranchNode", Field, 0, ""}, + {"IsEmptyTree", Func, 0, "func(n Node) bool"}, + {"ListNode", Type, 0, ""}, + {"ListNode.NodeType", Field, 0, ""}, + {"ListNode.Nodes", Field, 0, ""}, + {"ListNode.Pos", Field, 1, ""}, + {"Mode", Type, 16, ""}, + {"New", Func, 0, "func(name string, funcs ...map[string]any) *Tree"}, + {"NewIdentifier", Func, 0, "func(ident string) *IdentifierNode"}, + {"NilNode", Type, 1, ""}, + {"NilNode.NodeType", Field, 4, ""}, + {"NilNode.Pos", Field, 1, ""}, + {"Node", Type, 0, ""}, + {"NodeAction", Const, 0, ""}, + {"NodeBool", Const, 0, ""}, + {"NodeBreak", Const, 18, ""}, + {"NodeChain", Const, 1, ""}, + {"NodeCommand", Const, 0, ""}, + {"NodeComment", Const, 16, ""}, + {"NodeContinue", Const, 18, ""}, + {"NodeDot", Const, 0, ""}, + {"NodeField", Const, 0, ""}, + {"NodeIdentifier", Const, 0, ""}, + {"NodeIf", Const, 0, ""}, + {"NodeList", Const, 0, ""}, + {"NodeNil", Const, 1, ""}, + {"NodeNumber", Const, 0, ""}, + {"NodePipe", Const, 0, ""}, + {"NodeRange", Const, 0, ""}, + {"NodeString", Const, 0, ""}, + {"NodeTemplate", Const, 0, ""}, + {"NodeText", Const, 0, ""}, + {"NodeType", Type, 0, ""}, + {"NodeVariable", Const, 0, ""}, + {"NodeWith", Const, 0, ""}, + {"NumberNode", Type, 0, ""}, + {"NumberNode.Complex128", Field, 0, ""}, + {"NumberNode.Float64", Field, 0, ""}, + {"NumberNode.Int64", Field, 0, ""}, + {"NumberNode.IsComplex", Field, 0, ""}, + {"NumberNode.IsFloat", Field, 0, ""}, + {"NumberNode.IsInt", Field, 0, ""}, + {"NumberNode.IsUint", Field, 0, ""}, + {"NumberNode.NodeType", Field, 0, ""}, + {"NumberNode.Pos", Field, 1, ""}, + {"NumberNode.Text", Field, 0, ""}, + {"NumberNode.Uint64", Field, 0, ""}, + {"Parse", Func, 0, "func(name string, text string, leftDelim string, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error)"}, + {"ParseComments", Const, 16, ""}, + {"PipeNode", Type, 0, ""}, + {"PipeNode.Cmds", Field, 0, ""}, + {"PipeNode.Decl", Field, 0, ""}, + {"PipeNode.IsAssign", Field, 11, ""}, + {"PipeNode.Line", Field, 0, ""}, + {"PipeNode.NodeType", Field, 0, ""}, + {"PipeNode.Pos", Field, 1, ""}, + {"Pos", Type, 1, ""}, + {"RangeNode", Type, 0, ""}, + {"RangeNode.BranchNode", Field, 0, ""}, + {"SkipFuncCheck", Const, 17, ""}, + {"StringNode", Type, 0, ""}, + {"StringNode.NodeType", Field, 0, ""}, + {"StringNode.Pos", Field, 1, ""}, + {"StringNode.Quoted", Field, 0, ""}, + {"StringNode.Text", Field, 0, ""}, + {"TemplateNode", Type, 0, ""}, + {"TemplateNode.Line", Field, 0, ""}, + {"TemplateNode.Name", Field, 0, ""}, + {"TemplateNode.NodeType", Field, 0, ""}, + {"TemplateNode.Pipe", Field, 0, ""}, + {"TemplateNode.Pos", Field, 1, ""}, + {"TextNode", Type, 0, ""}, + {"TextNode.NodeType", Field, 0, ""}, + {"TextNode.Pos", Field, 1, ""}, + {"TextNode.Text", Field, 0, ""}, + {"Tree", Type, 0, ""}, + {"Tree.Mode", Field, 16, ""}, + {"Tree.Name", Field, 0, ""}, + {"Tree.ParseName", Field, 1, ""}, + {"Tree.Root", Field, 0, ""}, + {"VariableNode", Type, 0, ""}, + {"VariableNode.Ident", Field, 0, ""}, + {"VariableNode.NodeType", Field, 0, ""}, + {"VariableNode.Pos", Field, 1, ""}, + {"WithNode", Type, 0, ""}, + {"WithNode.BranchNode", Field, 0, ""}, + }, + "time": { + {"(*Location).String", Method, 0, ""}, + {"(*ParseError).Error", Method, 0, ""}, + {"(*Ticker).Reset", Method, 15, ""}, + {"(*Ticker).Stop", Method, 0, ""}, + {"(*Time).GobDecode", Method, 0, ""}, + {"(*Time).UnmarshalBinary", Method, 2, ""}, + {"(*Time).UnmarshalJSON", Method, 0, ""}, + {"(*Time).UnmarshalText", Method, 2, ""}, + {"(*Timer).Reset", Method, 1, ""}, + {"(*Timer).Stop", Method, 0, ""}, + {"(Duration).Abs", Method, 19, ""}, + {"(Duration).Hours", Method, 0, ""}, + {"(Duration).Microseconds", Method, 13, ""}, + {"(Duration).Milliseconds", Method, 13, ""}, + {"(Duration).Minutes", Method, 0, ""}, + {"(Duration).Nanoseconds", Method, 0, ""}, + {"(Duration).Round", Method, 9, ""}, + {"(Duration).Seconds", Method, 0, ""}, + {"(Duration).String", Method, 0, ""}, + {"(Duration).Truncate", Method, 9, ""}, + {"(Month).String", Method, 0, ""}, + {"(Time).Add", Method, 0, ""}, + {"(Time).AddDate", Method, 0, ""}, + {"(Time).After", Method, 0, ""}, + {"(Time).AppendBinary", Method, 24, ""}, + {"(Time).AppendFormat", Method, 5, ""}, + {"(Time).AppendText", Method, 24, ""}, + {"(Time).Before", Method, 0, ""}, + {"(Time).Clock", Method, 0, ""}, + {"(Time).Compare", Method, 20, ""}, + {"(Time).Date", Method, 0, ""}, + {"(Time).Day", Method, 0, ""}, + {"(Time).Equal", Method, 0, ""}, + {"(Time).Format", Method, 0, ""}, + {"(Time).GoString", Method, 17, ""}, + {"(Time).GobEncode", Method, 0, ""}, + {"(Time).Hour", Method, 0, ""}, + {"(Time).ISOWeek", Method, 0, ""}, + {"(Time).In", Method, 0, ""}, + {"(Time).IsDST", Method, 17, ""}, + {"(Time).IsZero", Method, 0, ""}, + {"(Time).Local", Method, 0, ""}, + {"(Time).Location", Method, 0, ""}, + {"(Time).MarshalBinary", Method, 2, ""}, + {"(Time).MarshalJSON", Method, 0, ""}, + {"(Time).MarshalText", Method, 2, ""}, + {"(Time).Minute", Method, 0, ""}, + {"(Time).Month", Method, 0, ""}, + {"(Time).Nanosecond", Method, 0, ""}, + {"(Time).Round", Method, 1, ""}, + {"(Time).Second", Method, 0, ""}, + {"(Time).String", Method, 0, ""}, + {"(Time).Sub", Method, 0, ""}, + {"(Time).Truncate", Method, 1, ""}, + {"(Time).UTC", Method, 0, ""}, + {"(Time).Unix", Method, 0, ""}, + {"(Time).UnixMicro", Method, 17, ""}, + {"(Time).UnixMilli", Method, 17, ""}, + {"(Time).UnixNano", Method, 0, ""}, + {"(Time).Weekday", Method, 0, ""}, + {"(Time).Year", Method, 0, ""}, + {"(Time).YearDay", Method, 1, ""}, + {"(Time).Zone", Method, 0, ""}, + {"(Time).ZoneBounds", Method, 19, ""}, + {"(Weekday).String", Method, 0, ""}, + {"ANSIC", Const, 0, ""}, + {"After", Func, 0, "func(d Duration) <-chan Time"}, + {"AfterFunc", Func, 0, "func(d Duration, f func()) *Timer"}, + {"April", Const, 0, ""}, + {"August", Const, 0, ""}, + {"Date", Func, 0, "func(year int, month Month, day int, hour int, min int, sec int, nsec int, loc *Location) Time"}, + {"DateOnly", Const, 20, ""}, + {"DateTime", Const, 20, ""}, + {"December", Const, 0, ""}, + {"Duration", Type, 0, ""}, + {"February", Const, 0, ""}, + {"FixedZone", Func, 0, "func(name string, offset int) *Location"}, + {"Friday", Const, 0, ""}, + {"Hour", Const, 0, ""}, + {"January", Const, 0, ""}, + {"July", Const, 0, ""}, + {"June", Const, 0, ""}, + {"Kitchen", Const, 0, ""}, + {"Layout", Const, 17, ""}, + {"LoadLocation", Func, 0, "func(name string) (*Location, error)"}, + {"LoadLocationFromTZData", Func, 10, "func(name string, data []byte) (*Location, error)"}, + {"Local", Var, 0, ""}, + {"Location", Type, 0, ""}, + {"March", Const, 0, ""}, + {"May", Const, 0, ""}, + {"Microsecond", Const, 0, ""}, + {"Millisecond", Const, 0, ""}, + {"Minute", Const, 0, ""}, + {"Monday", Const, 0, ""}, + {"Month", Type, 0, ""}, + {"Nanosecond", Const, 0, ""}, + {"NewTicker", Func, 0, "func(d Duration) *Ticker"}, + {"NewTimer", Func, 0, "func(d Duration) *Timer"}, + {"November", Const, 0, ""}, + {"Now", Func, 0, "func() Time"}, + {"October", Const, 0, ""}, + {"Parse", Func, 0, "func(layout string, value string) (Time, error)"}, + {"ParseDuration", Func, 0, "func(s string) (Duration, error)"}, + {"ParseError", Type, 0, ""}, + {"ParseError.Layout", Field, 0, ""}, + {"ParseError.LayoutElem", Field, 0, ""}, + {"ParseError.Message", Field, 0, ""}, + {"ParseError.Value", Field, 0, ""}, + {"ParseError.ValueElem", Field, 0, ""}, + {"ParseInLocation", Func, 1, "func(layout string, value string, loc *Location) (Time, error)"}, + {"RFC1123", Const, 0, ""}, + {"RFC1123Z", Const, 0, ""}, + {"RFC3339", Const, 0, ""}, + {"RFC3339Nano", Const, 0, ""}, + {"RFC822", Const, 0, ""}, + {"RFC822Z", Const, 0, ""}, + {"RFC850", Const, 0, ""}, + {"RubyDate", Const, 0, ""}, + {"Saturday", Const, 0, ""}, + {"Second", Const, 0, ""}, + {"September", Const, 0, ""}, + {"Since", Func, 0, "func(t Time) Duration"}, + {"Sleep", Func, 0, "func(d Duration)"}, + {"Stamp", Const, 0, ""}, + {"StampMicro", Const, 0, ""}, + {"StampMilli", Const, 0, ""}, + {"StampNano", Const, 0, ""}, + {"Sunday", Const, 0, ""}, + {"Thursday", Const, 0, ""}, + {"Tick", Func, 0, "func(d Duration) <-chan Time"}, + {"Ticker", Type, 0, ""}, + {"Ticker.C", Field, 0, ""}, + {"Time", Type, 0, ""}, + {"TimeOnly", Const, 20, ""}, + {"Timer", Type, 0, ""}, + {"Timer.C", Field, 0, ""}, + {"Tuesday", Const, 0, ""}, + {"UTC", Var, 0, ""}, + {"Unix", Func, 0, "func(sec int64, nsec int64) Time"}, + {"UnixDate", Const, 0, ""}, + {"UnixMicro", Func, 17, "func(usec int64) Time"}, + {"UnixMilli", Func, 17, "func(msec int64) Time"}, + {"Until", Func, 8, "func(t Time) Duration"}, + {"Wednesday", Const, 0, ""}, + {"Weekday", Type, 0, ""}, + }, + "unicode": { + {"(SpecialCase).ToLower", Method, 0, ""}, + {"(SpecialCase).ToTitle", Method, 0, ""}, + {"(SpecialCase).ToUpper", Method, 0, ""}, + {"ASCII_Hex_Digit", Var, 0, ""}, + {"Adlam", Var, 7, ""}, + {"Ahom", Var, 5, ""}, + {"Anatolian_Hieroglyphs", Var, 5, ""}, + {"Arabic", Var, 0, ""}, + {"Armenian", Var, 0, ""}, + {"Avestan", Var, 0, ""}, + {"AzeriCase", Var, 0, ""}, + {"Balinese", Var, 0, ""}, + {"Bamum", Var, 0, ""}, + {"Bassa_Vah", Var, 4, ""}, + {"Batak", Var, 0, ""}, + {"Bengali", Var, 0, ""}, + {"Bhaiksuki", Var, 7, ""}, + {"Bidi_Control", Var, 0, ""}, + {"Bopomofo", Var, 0, ""}, + {"Brahmi", Var, 0, ""}, + {"Braille", Var, 0, ""}, + {"Buginese", Var, 0, ""}, + {"Buhid", Var, 0, ""}, + {"C", Var, 0, ""}, + {"Canadian_Aboriginal", Var, 0, ""}, + {"Carian", Var, 0, ""}, + {"CaseRange", Type, 0, ""}, + {"CaseRange.Delta", Field, 0, ""}, + {"CaseRange.Hi", Field, 0, ""}, + {"CaseRange.Lo", Field, 0, ""}, + {"CaseRanges", Var, 0, ""}, + {"Categories", Var, 0, ""}, + {"Caucasian_Albanian", Var, 4, ""}, + {"Cc", Var, 0, ""}, + {"Cf", Var, 0, ""}, + {"Chakma", Var, 1, ""}, + {"Cham", Var, 0, ""}, + {"Cherokee", Var, 0, ""}, + {"Chorasmian", Var, 16, ""}, + {"Co", Var, 0, ""}, + {"Common", Var, 0, ""}, + {"Coptic", Var, 0, ""}, + {"Cs", Var, 0, ""}, + {"Cuneiform", Var, 0, ""}, + {"Cypriot", Var, 0, ""}, + {"Cypro_Minoan", Var, 21, ""}, + {"Cyrillic", Var, 0, ""}, + {"Dash", Var, 0, ""}, + {"Deprecated", Var, 0, ""}, + {"Deseret", Var, 0, ""}, + {"Devanagari", Var, 0, ""}, + {"Diacritic", Var, 0, ""}, + {"Digit", Var, 0, ""}, + {"Dives_Akuru", Var, 16, ""}, + {"Dogra", Var, 13, ""}, + {"Duployan", Var, 4, ""}, + {"Egyptian_Hieroglyphs", Var, 0, ""}, + {"Elbasan", Var, 4, ""}, + {"Elymaic", Var, 14, ""}, + {"Ethiopic", Var, 0, ""}, + {"Extender", Var, 0, ""}, + {"FoldCategory", Var, 0, ""}, + {"FoldScript", Var, 0, ""}, + {"Georgian", Var, 0, ""}, + {"Glagolitic", Var, 0, ""}, + {"Gothic", Var, 0, ""}, + {"Grantha", Var, 4, ""}, + {"GraphicRanges", Var, 0, ""}, + {"Greek", Var, 0, ""}, + {"Gujarati", Var, 0, ""}, + {"Gunjala_Gondi", Var, 13, ""}, + {"Gurmukhi", Var, 0, ""}, + {"Han", Var, 0, ""}, + {"Hangul", Var, 0, ""}, + {"Hanifi_Rohingya", Var, 13, ""}, + {"Hanunoo", Var, 0, ""}, + {"Hatran", Var, 5, ""}, + {"Hebrew", Var, 0, ""}, + {"Hex_Digit", Var, 0, ""}, + {"Hiragana", Var, 0, ""}, + {"Hyphen", Var, 0, ""}, + {"IDS_Binary_Operator", Var, 0, ""}, + {"IDS_Trinary_Operator", Var, 0, ""}, + {"Ideographic", Var, 0, ""}, + {"Imperial_Aramaic", Var, 0, ""}, + {"In", Func, 2, "func(r rune, ranges ...*RangeTable) bool"}, + {"Inherited", Var, 0, ""}, + {"Inscriptional_Pahlavi", Var, 0, ""}, + {"Inscriptional_Parthian", Var, 0, ""}, + {"Is", Func, 0, "func(rangeTab *RangeTable, r rune) bool"}, + {"IsControl", Func, 0, "func(r rune) bool"}, + {"IsDigit", Func, 0, "func(r rune) bool"}, + {"IsGraphic", Func, 0, "func(r rune) bool"}, + {"IsLetter", Func, 0, "func(r rune) bool"}, + {"IsLower", Func, 0, "func(r rune) bool"}, + {"IsMark", Func, 0, "func(r rune) bool"}, + {"IsNumber", Func, 0, "func(r rune) bool"}, + {"IsOneOf", Func, 0, "func(ranges []*RangeTable, r rune) bool"}, + {"IsPrint", Func, 0, "func(r rune) bool"}, + {"IsPunct", Func, 0, "func(r rune) bool"}, + {"IsSpace", Func, 0, "func(r rune) bool"}, + {"IsSymbol", Func, 0, "func(r rune) bool"}, + {"IsTitle", Func, 0, "func(r rune) bool"}, + {"IsUpper", Func, 0, "func(r rune) bool"}, + {"Javanese", Var, 0, ""}, + {"Join_Control", Var, 0, ""}, + {"Kaithi", Var, 0, ""}, + {"Kannada", Var, 0, ""}, + {"Katakana", Var, 0, ""}, + {"Kawi", Var, 21, ""}, + {"Kayah_Li", Var, 0, ""}, + {"Kharoshthi", Var, 0, ""}, + {"Khitan_Small_Script", Var, 16, ""}, + {"Khmer", Var, 0, ""}, + {"Khojki", Var, 4, ""}, + {"Khudawadi", Var, 4, ""}, + {"L", Var, 0, ""}, + {"Lao", Var, 0, ""}, + {"Latin", Var, 0, ""}, + {"Lepcha", Var, 0, ""}, + {"Letter", Var, 0, ""}, + {"Limbu", Var, 0, ""}, + {"Linear_A", Var, 4, ""}, + {"Linear_B", Var, 0, ""}, + {"Lisu", Var, 0, ""}, + {"Ll", Var, 0, ""}, + {"Lm", Var, 0, ""}, + {"Lo", Var, 0, ""}, + {"Logical_Order_Exception", Var, 0, ""}, + {"Lower", Var, 0, ""}, + {"LowerCase", Const, 0, ""}, + {"Lt", Var, 0, ""}, + {"Lu", Var, 0, ""}, + {"Lycian", Var, 0, ""}, + {"Lydian", Var, 0, ""}, + {"M", Var, 0, ""}, + {"Mahajani", Var, 4, ""}, + {"Makasar", Var, 13, ""}, + {"Malayalam", Var, 0, ""}, + {"Mandaic", Var, 0, ""}, + {"Manichaean", Var, 4, ""}, + {"Marchen", Var, 7, ""}, + {"Mark", Var, 0, ""}, + {"Masaram_Gondi", Var, 10, ""}, + {"MaxASCII", Const, 0, ""}, + {"MaxCase", Const, 0, ""}, + {"MaxLatin1", Const, 0, ""}, + {"MaxRune", Const, 0, ""}, + {"Mc", Var, 0, ""}, + {"Me", Var, 0, ""}, + {"Medefaidrin", Var, 13, ""}, + {"Meetei_Mayek", Var, 0, ""}, + {"Mende_Kikakui", Var, 4, ""}, + {"Meroitic_Cursive", Var, 1, ""}, + {"Meroitic_Hieroglyphs", Var, 1, ""}, + {"Miao", Var, 1, ""}, + {"Mn", Var, 0, ""}, + {"Modi", Var, 4, ""}, + {"Mongolian", Var, 0, ""}, + {"Mro", Var, 4, ""}, + {"Multani", Var, 5, ""}, + {"Myanmar", Var, 0, ""}, + {"N", Var, 0, ""}, + {"Nabataean", Var, 4, ""}, + {"Nag_Mundari", Var, 21, ""}, + {"Nandinagari", Var, 14, ""}, + {"Nd", Var, 0, ""}, + {"New_Tai_Lue", Var, 0, ""}, + {"Newa", Var, 7, ""}, + {"Nko", Var, 0, ""}, + {"Nl", Var, 0, ""}, + {"No", Var, 0, ""}, + {"Noncharacter_Code_Point", Var, 0, ""}, + {"Number", Var, 0, ""}, + {"Nushu", Var, 10, ""}, + {"Nyiakeng_Puachue_Hmong", Var, 14, ""}, + {"Ogham", Var, 0, ""}, + {"Ol_Chiki", Var, 0, ""}, + {"Old_Hungarian", Var, 5, ""}, + {"Old_Italic", Var, 0, ""}, + {"Old_North_Arabian", Var, 4, ""}, + {"Old_Permic", Var, 4, ""}, + {"Old_Persian", Var, 0, ""}, + {"Old_Sogdian", Var, 13, ""}, + {"Old_South_Arabian", Var, 0, ""}, + {"Old_Turkic", Var, 0, ""}, + {"Old_Uyghur", Var, 21, ""}, + {"Oriya", Var, 0, ""}, + {"Osage", Var, 7, ""}, + {"Osmanya", Var, 0, ""}, + {"Other", Var, 0, ""}, + {"Other_Alphabetic", Var, 0, ""}, + {"Other_Default_Ignorable_Code_Point", Var, 0, ""}, + {"Other_Grapheme_Extend", Var, 0, ""}, + {"Other_ID_Continue", Var, 0, ""}, + {"Other_ID_Start", Var, 0, ""}, + {"Other_Lowercase", Var, 0, ""}, + {"Other_Math", Var, 0, ""}, + {"Other_Uppercase", Var, 0, ""}, + {"P", Var, 0, ""}, + {"Pahawh_Hmong", Var, 4, ""}, + {"Palmyrene", Var, 4, ""}, + {"Pattern_Syntax", Var, 0, ""}, + {"Pattern_White_Space", Var, 0, ""}, + {"Pau_Cin_Hau", Var, 4, ""}, + {"Pc", Var, 0, ""}, + {"Pd", Var, 0, ""}, + {"Pe", Var, 0, ""}, + {"Pf", Var, 0, ""}, + {"Phags_Pa", Var, 0, ""}, + {"Phoenician", Var, 0, ""}, + {"Pi", Var, 0, ""}, + {"Po", Var, 0, ""}, + {"Prepended_Concatenation_Mark", Var, 7, ""}, + {"PrintRanges", Var, 0, ""}, + {"Properties", Var, 0, ""}, + {"Ps", Var, 0, ""}, + {"Psalter_Pahlavi", Var, 4, ""}, + {"Punct", Var, 0, ""}, + {"Quotation_Mark", Var, 0, ""}, + {"Radical", Var, 0, ""}, + {"Range16", Type, 0, ""}, + {"Range16.Hi", Field, 0, ""}, + {"Range16.Lo", Field, 0, ""}, + {"Range16.Stride", Field, 0, ""}, + {"Range32", Type, 0, ""}, + {"Range32.Hi", Field, 0, ""}, + {"Range32.Lo", Field, 0, ""}, + {"Range32.Stride", Field, 0, ""}, + {"RangeTable", Type, 0, ""}, + {"RangeTable.LatinOffset", Field, 1, ""}, + {"RangeTable.R16", Field, 0, ""}, + {"RangeTable.R32", Field, 0, ""}, + {"Regional_Indicator", Var, 10, ""}, + {"Rejang", Var, 0, ""}, + {"ReplacementChar", Const, 0, ""}, + {"Runic", Var, 0, ""}, + {"S", Var, 0, ""}, + {"STerm", Var, 0, ""}, + {"Samaritan", Var, 0, ""}, + {"Saurashtra", Var, 0, ""}, + {"Sc", Var, 0, ""}, + {"Scripts", Var, 0, ""}, + {"Sentence_Terminal", Var, 7, ""}, + {"Sharada", Var, 1, ""}, + {"Shavian", Var, 0, ""}, + {"Siddham", Var, 4, ""}, + {"SignWriting", Var, 5, ""}, + {"SimpleFold", Func, 0, "func(r rune) rune"}, + {"Sinhala", Var, 0, ""}, + {"Sk", Var, 0, ""}, + {"Sm", Var, 0, ""}, + {"So", Var, 0, ""}, + {"Soft_Dotted", Var, 0, ""}, + {"Sogdian", Var, 13, ""}, + {"Sora_Sompeng", Var, 1, ""}, + {"Soyombo", Var, 10, ""}, + {"Space", Var, 0, ""}, + {"SpecialCase", Type, 0, ""}, + {"Sundanese", Var, 0, ""}, + {"Syloti_Nagri", Var, 0, ""}, + {"Symbol", Var, 0, ""}, + {"Syriac", Var, 0, ""}, + {"Tagalog", Var, 0, ""}, + {"Tagbanwa", Var, 0, ""}, + {"Tai_Le", Var, 0, ""}, + {"Tai_Tham", Var, 0, ""}, + {"Tai_Viet", Var, 0, ""}, + {"Takri", Var, 1, ""}, + {"Tamil", Var, 0, ""}, + {"Tangsa", Var, 21, ""}, + {"Tangut", Var, 7, ""}, + {"Telugu", Var, 0, ""}, + {"Terminal_Punctuation", Var, 0, ""}, + {"Thaana", Var, 0, ""}, + {"Thai", Var, 0, ""}, + {"Tibetan", Var, 0, ""}, + {"Tifinagh", Var, 0, ""}, + {"Tirhuta", Var, 4, ""}, + {"Title", Var, 0, ""}, + {"TitleCase", Const, 0, ""}, + {"To", Func, 0, "func(_case int, r rune) rune"}, + {"ToLower", Func, 0, "func(r rune) rune"}, + {"ToTitle", Func, 0, "func(r rune) rune"}, + {"ToUpper", Func, 0, "func(r rune) rune"}, + {"Toto", Var, 21, ""}, + {"TurkishCase", Var, 0, ""}, + {"Ugaritic", Var, 0, ""}, + {"Unified_Ideograph", Var, 0, ""}, + {"Upper", Var, 0, ""}, + {"UpperCase", Const, 0, ""}, + {"UpperLower", Const, 0, ""}, + {"Vai", Var, 0, ""}, + {"Variation_Selector", Var, 0, ""}, + {"Version", Const, 0, ""}, + {"Vithkuqi", Var, 21, ""}, + {"Wancho", Var, 14, ""}, + {"Warang_Citi", Var, 4, ""}, + {"White_Space", Var, 0, ""}, + {"Yezidi", Var, 16, ""}, + {"Yi", Var, 0, ""}, + {"Z", Var, 0, ""}, + {"Zanabazar_Square", Var, 10, ""}, + {"Zl", Var, 0, ""}, + {"Zp", Var, 0, ""}, + {"Zs", Var, 0, ""}, + }, + "unicode/utf16": { + {"AppendRune", Func, 20, "func(a []uint16, r rune) []uint16"}, + {"Decode", Func, 0, "func(s []uint16) []rune"}, + {"DecodeRune", Func, 0, "func(r1 rune, r2 rune) rune"}, + {"Encode", Func, 0, "func(s []rune) []uint16"}, + {"EncodeRune", Func, 0, "func(r rune) (r1 rune, r2 rune)"}, + {"IsSurrogate", Func, 0, "func(r rune) bool"}, + {"RuneLen", Func, 23, "func(r rune) int"}, + }, + "unicode/utf8": { + {"AppendRune", Func, 18, "func(p []byte, r rune) []byte"}, + {"DecodeLastRune", Func, 0, "func(p []byte) (r rune, size int)"}, + {"DecodeLastRuneInString", Func, 0, "func(s string) (r rune, size int)"}, + {"DecodeRune", Func, 0, "func(p []byte) (r rune, size int)"}, + {"DecodeRuneInString", Func, 0, "func(s string) (r rune, size int)"}, + {"EncodeRune", Func, 0, "func(p []byte, r rune) int"}, + {"FullRune", Func, 0, "func(p []byte) bool"}, + {"FullRuneInString", Func, 0, "func(s string) bool"}, + {"MaxRune", Const, 0, ""}, + {"RuneCount", Func, 0, "func(p []byte) int"}, + {"RuneCountInString", Func, 0, "func(s string) (n int)"}, + {"RuneError", Const, 0, ""}, + {"RuneLen", Func, 0, "func(r rune) int"}, + {"RuneSelf", Const, 0, ""}, + {"RuneStart", Func, 0, "func(b byte) bool"}, + {"UTFMax", Const, 0, ""}, + {"Valid", Func, 0, "func(p []byte) bool"}, + {"ValidRune", Func, 1, "func(r rune) bool"}, + {"ValidString", Func, 0, "func(s string) bool"}, + }, + "unique": { + {"(Handle).Value", Method, 23, ""}, + {"Handle", Type, 23, ""}, + {"Make", Func, 23, "func[T comparable](value T) Handle[T]"}, + }, + "unsafe": { + {"Add", Func, 0, ""}, + {"Alignof", Func, 0, ""}, + {"Offsetof", Func, 0, ""}, + {"Pointer", Type, 0, ""}, + {"Sizeof", Func, 0, ""}, + {"Slice", Func, 0, ""}, + {"SliceData", Func, 0, ""}, + {"String", Func, 0, ""}, + {"StringData", Func, 0, ""}, + }, + "weak": { + {"(Pointer).Value", Method, 24, ""}, + {"Make", Func, 24, "func[T any](ptr *T) Pointer[T]"}, + {"Pointer", Type, 24, ""}, + }, +} diff --git a/openshift/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/openshift/vendor/golang.org/x/tools/internal/stdlib/stdlib.go new file mode 100644 index 0000000000..e223e0f340 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run generate.go + +// Package stdlib provides a table of all exported symbols in the +// standard library, along with the version at which they first +// appeared. It also provides the import graph of std packages. +package stdlib + +import ( + "fmt" + "strings" +) + +type Symbol struct { + Name string + Kind Kind + Version Version // Go version that first included the symbol + // Signature provides the type of a function (defined only for Kind=Func). + // Imported types are denoted as pkg.T; pkg is not fully qualified. + // TODO(adonovan): use an unambiguous encoding that is parseable. + // + // Example2: + // func[M ~map[K]V, K comparable, V any](m M) M + // func(fi fs.FileInfo, link string) (*Header, error) + Signature string // if Kind == stdlib.Func +} + +// A Kind indicates the kind of a symbol: +// function, variable, constant, type, and so on. +type Kind int8 + +const ( + Invalid Kind = iota // Example name: + Type // "Buffer" + Func // "Println" + Var // "EOF" + Const // "Pi" + Field // "Point.X" + Method // "(*Buffer).Grow" +) + +func (kind Kind) String() string { + return [...]string{ + Invalid: "invalid", + Type: "type", + Func: "func", + Var: "var", + Const: "const", + Field: "field", + Method: "method", + }[kind] +} + +// A Version represents a version of Go of the form "go1.%d". +type Version int8 + +// String returns a version string of the form "go1.23", without allocating. +func (v Version) String() string { return versions[v] } + +var versions [30]string // (increase constant as needed) + +func init() { + for i := range versions { + versions[i] = fmt.Sprintf("go1.%d", i) + } +} + +// HasPackage reports whether the specified package path is part of +// the standard library's public API. +func HasPackage(path string) bool { + _, ok := PackageSymbols[path] + return ok +} + +// SplitField splits the field symbol name into type and field +// components. It must be called only on Field symbols. +// +// Example: "File.Package" -> ("File", "Package") +func (sym *Symbol) SplitField() (typename, name string) { + if sym.Kind != Field { + panic("not a field") + } + typename, name, _ = strings.Cut(sym.Name, ".") + return +} + +// SplitMethod splits the method symbol name into pointer, receiver, +// and method components. It must be called only on Method symbols. +// +// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow") +func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) { + if sym.Kind != Method { + panic("not a method") + } + recv, name, _ = strings.Cut(sym.Name, ".") + recv = recv[len("(") : len(recv)-len(")")] + ptr = recv[0] == '*' + if ptr { + recv = recv[len("*"):] + } + return +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typeparams/common.go b/openshift/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 0000000000..cdae2b8e81 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,68 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *ast.IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter (or an alias of one). +func IsTypeParam(t types.Type) bool { + _, ok := types.Unalias(t).(*types.TypeParam) + return ok +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/openshift/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 0000000000..27a2b17929 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,155 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "fmt" + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(T types.Type) ([]*types.Term, error) { + // typeSetOf(T) == typeSetOf(Unalias(T)) + typ := types.Unalias(T) + if named, ok := typ.(*types.Named); ok { + typ = named.Underlying() + } + switch typ := typ.(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, T)}, nil + } +} + +// Deref returns the type of the variable pointed to by t, +// if t's core type is a pointer; otherwise it returns t. +// +// Do not assume that Deref(T)==T implies T is not a pointer: +// consider "type T *T", for example. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func Deref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typeparams/free.go b/openshift/vendor/golang.org/x/tools/internal/typeparams/free.go new file mode 100644 index 0000000000..709d2fc144 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// Free is a memoization of the set of free type parameters within a +// type. It makes a sequence of calls to [Free.Has] for overlapping +// types more efficient. The zero value is ready for use. +// +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor. +type Free struct { + seen map[types.Type]bool +} + +// Has reports whether the specified type has a free type parameter. +func (w *Free) Has(typ types.Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + if w.seen == nil { + w.seen = make(map[types.Type]bool) + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case nil, *types.Basic: // TODO(gri) should nil be handled here? + break + + case *types.Alias: + if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + return true // This is an uninstantiated Alias. + } + // The expansion of an alias can have free type parameters, + // whether or not the alias itself has type parameters: + // + // func _[K comparable]() { + // type Set = map[K]bool // free(Set) = {K} + // type MapTo[V] = map[K]V // free(Map[foo]) = {V} + // } + // + // So, we must Unalias. + return w.Has(types.Unalias(t)) + + case *types.Array: + return w.Has(t.Elem()) + + case *types.Slice: + return w.Has(t.Elem()) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if w.Has(t.Field(i).Type()) { + return true + } + } + + case *types.Pointer: + return w.Has(t.Elem()) + + case *types.Tuple: + n := t.Len() + for i := range n { + if w.Has(t.At(i).Type()) { + return true + } + } + + case *types.Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return w.Has(t.Params()) || w.Has(t.Results()) + + case *types.Interface: + for i, n := 0, t.NumMethods(); i < n; i++ { + if w.Has(t.Method(i).Type()) { + return true + } + } + terms, err := InterfaceTermSet(t) + if err != nil { + return false // ill typed + } + for _, term := range terms { + if w.Has(term.Type()) { + return true + } + } + + case *types.Map: + return w.Has(t.Key()) || w.Has(t.Elem()) + + case *types.Chan: + return w.Has(t.Elem()) + + case *types.Named: + args := t.TypeArgs() + if params := t.TypeParams(); params.Len() > args.Len() { + return true // this is an uninstantiated named type. + } + for i, n := 0, args.Len(); i < n; i++ { + if w.Has(args.At(i)) { + return true + } + } + return w.Has(t.Underlying()) // recurse for types local to parameterized functions + + case *types.TypeParam: + return true + + default: + panic(t) // unreachable + } + + return false +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/openshift/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 0000000000..f49802b8ef --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...any) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/openshift/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 0000000000..9bc29143f6 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,169 @@ +// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT. +// Source: ../../cmd/compile/internal/types2/termlist.go + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "go/types" + "strings" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// termSep is the separator used between individual terms. +const termSep = " | " + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf strings.Builder + for i, x := range xl { + if i > 0 { + buf.WriteString(termSep) + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/openshift/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 0000000000..fa758cdc98 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,172 @@ +// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT. +// Source: ../../cmd/compile/internal/types2/typeterm.go + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go new file mode 100644 index 0000000000..3db2a135b9 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go @@ -0,0 +1,137 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/ast" + "go/types" + _ "unsafe" +) + +// CallKind describes the function position of an [*ast.CallExpr]. +type CallKind int + +const ( + CallStatic CallKind = iota // static call to known function + CallInterface // dynamic call through an interface method + CallDynamic // dynamic call of a func value + CallBuiltin // call to a builtin function + CallConversion // a conversion (not a call) +) + +var callKindNames = []string{ + "CallStatic", + "CallInterface", + "CallDynamic", + "CallBuiltin", + "CallConversion", +} + +func (k CallKind) String() string { + if i := int(k); i >= 0 && i < len(callKindNames) { + return callKindNames[i] + } + return fmt.Sprintf("typeutil.CallKind(%d)", k) +} + +// ClassifyCall classifies the function position of a call expression ([*ast.CallExpr]). +// It distinguishes among true function calls, calls to builtins, and type conversions, +// and further classifies function calls as static calls (where the function is known), +// dynamic interface calls, and other dynamic calls. +// +// For the declarations: +// +// func f() {} +// func g[T any]() {} +// var v func() +// var s []func() +// type I interface { M() } +// var i I +// +// ClassifyCall returns the following: +// +// f() CallStatic +// g[int]() CallStatic +// i.M() CallInterface +// min(1, 2) CallBuiltin +// v() CallDynamic +// s[0]() CallDynamic +// int(x) CallConversion +// []byte("") CallConversion +func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind { + if info.Types == nil { + panic("ClassifyCall: info.Types is nil") + } + tv := info.Types[call.Fun] + if tv.IsType() { + return CallConversion + } + if tv.IsBuiltin() { + return CallBuiltin + } + obj := info.Uses[UsedIdent(info, call.Fun)] + // Classify the call by the type of the object, if any. + switch obj := obj.(type) { + case *types.Func: + if interfaceMethod(obj) { + return CallInterface + } + return CallStatic + default: + return CallDynamic + } +} + +// UsedIdent returns the identifier such that info.Uses[UsedIdent(info, e)] +// is the [types.Object] used by e, if any. +// +// If e is one of various forms of reference: +// +// f, c, v, T lexical reference +// pkg.X qualified identifier +// f[T] or pkg.F[K,V] instantiations of the above kinds +// expr.f field or method value selector +// T.f method expression selector +// +// UsedIdent returns the identifier whose is associated value in [types.Info.Uses] +// is the object to which it refers. +// +// For the declarations: +// +// func F[T any] {...} +// type I interface { M() } +// var ( +// x int +// s struct { f int } +// a []int +// i I +// ) +// +// UsedIdent returns the following: +// +// Expr UsedIdent +// x x +// s.f f +// F[int] F +// i.M M +// I.M M +// min min +// int int +// 1 nil +// a[0] nil +// []byte nil +// +// Note: if e is an instantiated function or method, UsedIdent returns +// the corresponding generic function or method on the generic type. +func UsedIdent(info *types.Info, e ast.Expr) *ast.Ident { + return usedIdent(info, e) +} + +//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent +func usedIdent(info *types.Info, e ast.Expr) *ast.Ident + +//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod +func interfaceMethod(f *types.Func) bool diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/element.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 0000000000..4957f02164 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 0000000000..235a6defc4 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1560 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNilUse + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Length_and_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appending_and_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Length_and_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo + + // All codes below were added in Go 1.17. + + /* decl */ + + // BadDecl occurs when a declaration has invalid syntax. + BadDecl + + // RepeatedDecl occurs when an identifier occurs more than once on the left + // hand side of a short variable declaration. + // + // Example: + // func _() { + // x, y, y := 1, 2, 3 + // } + RepeatedDecl + + /* unsafe */ + + // InvalidUnsafeAdd occurs when unsafe.Add is called with a + // length argument that is not of integer type. + // + // Example: + // import "unsafe" + // + // var p unsafe.Pointer + // var _ = unsafe.Add(p, float64(1)) + InvalidUnsafeAdd + + // InvalidUnsafeSlice occurs when unsafe.Slice is called with a + // pointer argument that is not of pointer type or a length argument + // that is not of integer type, negative, or out of bounds. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(x, 1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, float64(1)) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, -1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, uint64(1) << 63) + InvalidUnsafeSlice + + // All codes below were added in Go 1.18. + + /* features */ + + // UnsupportedFeature occurs when a language feature is used that is not + // supported at this Go version. + UnsupportedFeature + + /* type params */ + + // NotAGenericType occurs when a non-generic type is used where a generic + // type is expected: in type or function instantiation. + // + // Example: + // type T int + // + // var _ T[int] + NotAGenericType + + // WrongTypeArgCount occurs when a type or function is instantiated with an + // incorrect number of type arguments, including when a generic type or + // function is used without instantiation. + // + // Errors involving failed type inference are assigned other error codes. + // + // Example: + // type T[p any] int + // + // var _ T[int, string] + // + // Example: + // func f[T any]() {} + // + // var x = f + WrongTypeArgCount + + // CannotInferTypeArgs occurs when type or function type argument inference + // fails to infer all type arguments. + // + // Example: + // func f[T any]() {} + // + // func _() { + // f() + // } + // + // Example: + // type N[P, Q any] struct{} + // + // var _ N[int] + CannotInferTypeArgs + + // InvalidTypeArg occurs when a type argument does not satisfy its + // corresponding type parameter constraints. + // + // Example: + // type T[P ~int] struct{} + // + // var _ T[string] + InvalidTypeArg // arguments? InferenceFailed + + // InvalidInstanceCycle occurs when an invalid cycle is detected + // within the instantiation graph. + // + // Example: + // func f[T any]() { f[*T]() } + InvalidInstanceCycle + + // InvalidUnion occurs when an embedded union or approximation element is + // not valid. + // + // Example: + // type _ interface { + // ~int | interface{ m() } + // } + InvalidUnion + + // MisplacedConstraintIface occurs when a constraint-type interface is used + // outside of constraint position. + // + // Example: + // type I interface { ~int } + // + // var _ I + MisplacedConstraintIface + + // InvalidMethodTypeParams occurs when methods have type parameters. + // + // It cannot be encountered with an AST parsed using go/parser. + InvalidMethodTypeParams + + // MisplacedTypeParam occurs when a type parameter is used in a place where + // it is not permitted. + // + // Example: + // type T[P any] P + // + // Example: + // type T[P any] struct{ *P } + MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + +) diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 0000000000..15ecf7c5de --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,179 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNilUse-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] + _ = x[BadDecl-131] + _ = x[RepeatedDecl-132] + _ = x[InvalidUnsafeAdd-133] + _ = x[InvalidUnsafeSlice-134] + _ = x[UnsupportedFeature-135] + _ = x[NotAGenericType-136] + _ = x[WrongTypeArgCount-137] + _ = x[CannotInferTypeArgs-138] + _ = x[InvalidTypeArg-139] + _ = x[InvalidInstanceCycle-140] + _ = x[InvalidUnion-141] + _ = x[MisplacedConstraintIface-142] + _ = x[InvalidMethodTypeParams-143] + _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] +} + +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) + +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) + +func (i ErrorCode) String() string { + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go new file mode 100644 index 0000000000..b64f714eb3 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -0,0 +1,46 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/types" + "strconv" +) + +// FileQualifier returns a [types.Qualifier] function that qualifies +// imported symbols appropriately based on the import environment of a given +// file. +// If the same package is imported multiple times, the last appearance is +// recorded. +func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier { + // Construct mapping of import paths to their defined names. + // It is only necessary to look at renaming imports. + imports := make(map[string]string) + for _, imp := range f.Imports { + if imp.Name != nil && imp.Name.Name != "_" { + path, _ := strconv.Unquote(imp.Path.Value) + imports[path] = imp.Name.Name + } + } + + // Define qualifier to replace full package paths with names of the imports. + return func(p *types.Package) string { + if p == nil || p == pkg { + return "" + } + + if name, ok := imports[p.Path()]; ok { + if name == "." { + return "" + } else { + return name + } + } + + // If there is no local renaming, fall back to the package name. + return p.Name() + } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/recv.go new file mode 100644 index 0000000000..8352ea7617 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -0,0 +1,44 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" +) + +// ReceiverNamed returns the named type (if any) associated with the +// type of recv, which may be of the form N or *N, or aliases thereof. +// It also reports whether a Pointer was present. +// +// The named result may be nil if recv is from a method on an +// anonymous interface or struct types or in ill-typed code. +func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { + t := recv.Type() + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { + isPtr = true + t = ptr.Elem() + } + named, _ = types.Unalias(t).(*types.Named) + return +} + +// Unpointer returns T given *T or an alias thereof. +// For all other types it is the identity function. +// It does not look at underlying types. +// The result may be an alias. +// +// Use this function to strip off the optional pointer on a receiver +// in a field or method selection, without losing the named type +// (which is needed to compute the method set). +// +// See also [typeparams.MustDeref], which removes one level of +// indirection from the type, regardless of named types (analogous to +// a LOAD instruction). +func Unpointer(t types.Type) types.Type { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/toonew.go new file mode 100644 index 0000000000..cc86487eaa --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/toonew.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// TooNewStdSymbols computes the set of package-level symbols +// exported by pkg that are not available at the specified version. +// The result maps each symbol to its minimum version. +// +// The pkg is allowed to contain type errors. +func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string { + disallowed := make(map[types.Object]string) + + // Pass 1: package-level symbols. + symbols := stdlib.PackageSymbols[pkg.Path()] + for _, sym := range symbols { + symver := sym.Version.String() + if versions.Before(version, symver) { + switch sym.Kind { + case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type: + disallowed[pkg.Scope().Lookup(sym.Name)] = symver + } + } + } + + // Pass 2: fields and methods. + // + // We allow fields and methods if their associated type is + // disallowed, as otherwise we would report false positives + // for compatibility shims. Consider: + // + // //go:build go1.22 + // type T struct { F std.Real } // correct new API + // + // //go:build !go1.22 + // type T struct { F fake } // shim + // type fake struct { ... } + // func (fake) M () {} + // + // These alternative declarations of T use either the std.Real + // type, introduced in go1.22, or a fake type, for the field + // F. (The fakery could be arbitrarily deep, involving more + // nested fields and methods than are shown here.) Clients + // that use the compatibility shim T will compile with any + // version of go, whether older or newer than go1.22, but only + // the newer version will use the std.Real implementation. + // + // Now consider a reference to method M in new(T).F.M() in a + // module that requires a minimum of go1.21. The analysis may + // occur using a version of Go higher than 1.21, selecting the + // first version of T, so the method M is Real.M. This would + // spuriously cause the analyzer to report a reference to a + // too-new symbol even though this expression compiles just + // fine (with the fake implementation) using go1.21. + for _, sym := range symbols { + symVersion := sym.Version.String() + if !versions.Before(version, symVersion) { + continue // allowed + } + + var obj types.Object + switch sym.Kind { + case stdlib.Field: + typename, name := sym.SplitField() + if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name) + } + + case stdlib.Method: + ptr, recvname, name := sym.SplitMethod() + if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name) + } + } + if obj != nil { + disallowed[obj] = symVersion + } + } + + return disallowed +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/types.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 0000000000..a5cd7e8dbf --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,155 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/ast" + "go/token" + "go/types" + "reflect" + "unsafe" + + "golang.org/x/tools/internal/aliases" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +// ErrorCodeStartEnd extracts additional information from types.Error values +// generated by Go version 1.16 and later: the error code, start position, and +// end position. If all positions are valid, start <= err.Pos <= end. +// +// If the data could not be read, the final result parameter will be false. +// +// TODO(adonovan): eliminate start/end when proposal #71803 is accepted. +func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(err) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} + +// NameRelativeTo returns a types.Qualifier that qualifies members of +// all packages other than pkg, using only the package name. +// (By contrast, [types.RelativeTo] uses the complete package path, +// which is often excessive.) +// +// If pkg is nil, it is equivalent to [*types.Package.Name]. +func NameRelativeTo(pkg *types.Package) types.Qualifier { + return func(other *types.Package) string { + if pkg != nil && pkg == other { + return "" // same package; unqualified + } + return other.Name() + } +} + +// TypeNameFor returns the type name symbol for the specified type, if +// it is a [*types.Alias], [*types.Named], [*types.TypeParam], or a +// [*types.Basic] representing a type. +// +// For all other types, and for Basic types representing a builtin, +// constant, or nil, it returns nil. Be careful not to convert the +// resulting nil pointer to a [types.Object]! +// +// If t is the type of a constant, it may be an "untyped" type, which +// has no TypeName. To access the name of such types (e.g. "untyped +// int"), use [types.Basic.Name]. +func TypeNameFor(t types.Type) *types.TypeName { + switch t := t.(type) { + case *types.Alias: + return t.Obj() + case *types.Named: + return t.Obj() + case *types.TypeParam: + return t.Obj() + case *types.Basic: + // See issues #71886 and #66890 for some history. + if tname, ok := types.Universe.Lookup(t.Name()).(*types.TypeName); ok { + return tname + } + } + return nil +} + +// A NamedOrAlias is a [types.Type] that is named (as +// defined by the spec) and capable of bearing type parameters: it +// abstracts aliases ([types.Alias]) and defined types +// ([types.Named]). +// +// Every type declared by an explicit "type" declaration is a +// NamedOrAlias. (Built-in type symbols may additionally +// have type [types.Basic], which is not a NamedOrAlias, +// though the spec regards them as "named"; see [TypeNameFor].) +// +// NamedOrAlias cannot expose the Origin method, because +// [types.Alias.Origin] and [types.Named.Origin] have different +// (covariant) result types; use [Origin] instead. +type NamedOrAlias interface { + types.Type + Obj() *types.TypeName + TypeArgs() *types.TypeList + TypeParams() *types.TypeParamList + SetTypeParams(tparams []*types.TypeParam) +} + +var ( + _ NamedOrAlias = (*types.Alias)(nil) + _ NamedOrAlias = (*types.Named)(nil) +) + +// Origin returns the generic type of the Named or Alias type t if it +// is instantiated, otherwise it returns t. +func Origin(t NamedOrAlias) NamedOrAlias { + switch t := t.(type) { + case *types.Alias: + return aliases.Origin(t) + case *types.Named: + return t.Origin() + } + return t +} + +// IsPackageLevel reports whether obj is a package-level symbol. +func IsPackageLevel(obj types.Object) bool { + return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope() +} + +// NewTypesInfo returns a *types.Info with all maps populated. +func NewTypesInfo() *types.Info { + return &types.Info{ + Types: map[ast.Expr]types.TypeAndValue{}, + Instances: map[*ast.Ident]types.Instance{}, + Defs: map[*ast.Ident]types.Object{}, + Uses: map[*ast.Ident]types.Object{}, + Implicits: map[ast.Node]types.Object{}, + Selections: map[*ast.SelectorExpr]*types.Selection{}, + Scopes: map[ast.Node]*types.Scope{}, + FileVersions: map[*ast.File]string{}, + } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/varkind.go new file mode 100644 index 0000000000..e5da049511 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -0,0 +1,40 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +// TODO(adonovan): when CL 645115 lands, define the go1.25 version of +// this API that actually does something. + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/openshift/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/openshift/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go new file mode 100644 index 0000000000..d272949c17 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -0,0 +1,392 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" +) + +// ZeroString returns the string representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroString may return a partially correct +// string representation. The caller should use the returned isValid boolean +// to determine the validity of the expression. +// +// When assigning to a wider type (such as 'any'), it's the caller's +// responsibility to handle any necessary type conversions. +// +// This string can be used on the right-hand side of an assignment where the +// left-hand side has that explicit type. +// References to named types are qualified by an appropriate (optional) +// qualifier function. +// Exception: This does not apply to tuples. Their string representation is +// informational only and cannot be used in an assignment. +// +// See [ZeroExpr] for a variant that returns an [ast.Expr]. +func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return "false", true + case t.Info()&types.IsNumeric != 0: + return "0", true + case t.Info()&types.IsString != 0: + return `""`, true + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return "nil", true + case t.Kind() == types.Invalid: + return "invalid", false + default: + panic(fmt.Sprintf("ZeroString for unexpected type %v", t)) + } + + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return "nil", true + + case *types.Interface: + if !t.IsMethodSet() { + return "invalid", false + } + return "nil", true + + case *types.Named: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true + default: + return ZeroString(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true + default: + // A type parameter can have alias but alias type's underlying type + // can never be a type parameter. + // Use types.Unalias to preserve the info of type parameter instead + // of call Underlying() going right through and get the underlying + // type of the type parameter which is always an interface. + return ZeroString(types.Unalias(t), qual) + } + + case *types.Array, *types.Struct: + return types.TypeString(t, qual) + "{}", true + + case *types.TypeParam: + // Assumes func new is not shadowed. + return "*new(" + types.TypeString(t, qual) + ")", true + + case *types.Tuple: + // Tuples are not normal values. + // We are currently format as "(t[0], ..., t[n])". Could be something else. + isValid := true + components := make([]string, t.Len()) + for i := 0; i < t.Len(); i++ { + comp, ok := ZeroString(t.At(i).Type(), qual) + + components[i] = comp + isValid = isValid && ok + } + return "(" + strings.Join(components, ", ") + ")", isValid + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// ZeroExpr returns the ast.Expr representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr +// representation. The caller should use the returned isValid boolean to determine +// the validity of the expression. +// +// This function is designed for types suitable for variables and should not be +// used with Tuple or Union types.References to named types are qualified by an +// appropriate (optional) qualifier function. +// +// See [ZeroString] for a variant that returns a string. +func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"}, true + case t.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"}, true + case t.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return ast.NewIdent("nil"), true + case t.Kind() == types.Invalid: + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + default: + panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t)) + } + + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return ast.NewIdent("nil"), true + + case *types.Interface: + if !t.IsMethodSet() { + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + } + return ast.NewIdent("nil"), true + + case *types.Named: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(types.Unalias(t), qual) + } + + case *types.Array, *types.Struct: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + + case *types.TypeParam: + return &ast.StarExpr{ // *new(T) + X: &ast.CallExpr{ + // Assumes func new is not shadowed. + Fun: ast.NewIdent("new"), + Args: []ast.Expr{ + ast.NewIdent(t.Obj().Name()), + }, + }, + }, true + + case *types.Tuple: + // Unlike ZeroString, there is no ast.Expr can express tuple by + // "(t[0], ..., t[n])". + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// IsZeroExpr uses simple syntactic heuristics to report whether expr +// is a obvious zero value, such as 0, "", nil, or false. +// It cannot do better without type information. +func IsZeroExpr(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +// TypeExpr returns syntax for the specified type. References to named types +// are qualified by an appropriate (optional) qualifier function. +// It may panic for types such as Tuple or Union. +func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { + switch t := t.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + + case *types.Pointer: + return &ast.UnaryExpr{ + Op: token.MUL, + X: TypeExpr(t.Elem(), qual), + } + + case *types.Array: + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: TypeExpr(t.Elem(), qual), + } + + case *types.Slice: + return &ast.ArrayType{ + Elt: TypeExpr(t.Elem(), qual), + } + + case *types.Map: + return &ast.MapType{ + Key: TypeExpr(t.Key(), qual), + Value: TypeExpr(t.Elem(), qual), + } + + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + return &ast.ChanType{ + Dir: dir, + Value: TypeExpr(t.Elem(), qual), + } + + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + params = append(params, &ast.Field{ + Type: TypeExpr(t.Params().At(i).Type(), qual), + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + returns = append(returns, &ast.Field{ + Type: TypeExpr(t.Results().At(i).Type(), qual), + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + + case *types.TypeParam: + pkgName := qual(t.Obj().Pkg()) + if pkgName == "" || t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + + // types.TypeParam also implements interface NamedOrAlias. To differentiate, + // case TypeParam need to be present before case NamedOrAlias. + // TODO(hxjiang): remove this comment once TypeArgs() is added to interface + // NamedOrAlias. + case NamedOrAlias: + var expr ast.Expr = ast.NewIdent(t.Obj().Name()) + if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" { + expr = &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: expr.(*ast.Ident), + } + } + + // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to + // typesinternal.NamedOrAlias. + if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { + if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { + var indices []ast.Expr + for i := range typeArgs.Len() { + indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + } + expr = &ast.IndexListExpr{ + X: expr, + Indices: indices, + } + } + } + + return expr + + case *types.Struct: + return ast.NewIdent(t.String()) + + case *types.Interface: + return ast.NewIdent(t.String()) + + case *types.Union: + if t.Len() == 0 { + panic("Union type should have at least one term") + } + // Same as go/ast, the return expression will put last term in the + // Y field at topmost level of BinaryExpr. + // For union of type "float32 | float64 | int64", the structure looks + // similar to: + // { + // X: { + // X: float32, + // Op: | + // Y: float64, + // } + // Op: |, + // Y: int64, + // } + var union ast.Expr + for i := range t.Len() { + term := t.Term(i) + termExpr := TypeExpr(term.Type(), qual) + if term.Tilde() { + termExpr = &ast.UnaryExpr{ + Op: token.TILDE, + X: termExpr, + } + } + if i == 0 { + union = termExpr + } else { + union = &ast.BinaryExpr{ + X: union, + Op: token.OR, + Y: termExpr, + } + } + } + return union + + case *types.Tuple: + panic("invalid input type types.Tuple") + + default: + panic("unreachable") + } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/versions/features.go b/openshift/vendor/golang.org/x/tools/internal/versions/features.go new file mode 100644 index 0000000000..b53f178616 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/versions/features.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// This file contains predicates for working with file versions to +// decide when a tool should consider a language feature enabled. + +// GoVersions that features in x/tools can be gated to. +const ( + Go1_18 = "go1.18" + Go1_19 = "go1.19" + Go1_20 = "go1.20" + Go1_21 = "go1.21" + Go1_22 = "go1.22" +) + +// Future is an invalid unknown Go version sometime in the future. +// Do not use directly with Compare. +const Future = "" + +// AtLeast reports whether the file version v comes after a Go release. +// +// Use this predicate to enable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func AtLeast(v, release string) bool { + if v == Future { + return true // an unknown future version is always after y. + } + return Compare(Lang(v), Lang(release)) >= 0 +} + +// Before reports whether the file version v is strictly before a Go release. +// +// Use this predicate to disable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func Before(v, release string) bool { + if v == Future { + return false // an unknown future version happens after y. + } + return Compare(Lang(v), Lang(release)) < 0 +} diff --git a/openshift/vendor/golang.org/x/tools/internal/versions/gover.go b/openshift/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 0000000000..bbabcd22e9 --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/openshift/vendor/golang.org/x/tools/internal/versions/types.go b/openshift/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 0000000000..0fc10ce4eb --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v + } + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} diff --git a/openshift/vendor/golang.org/x/tools/internal/versions/versions.go b/openshift/vendor/golang.org/x/tools/internal/versions/versions.go new file mode 100644 index 0000000000..8d1f7453db --- /dev/null +++ b/openshift/vendor/golang.org/x/tools/internal/versions/versions.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "strings" +) + +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/openshift/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/openshift/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index e942bc983e..743bfb81d6 100644 --- a/openshift/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/openshift/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) { func SizeVarint(v uint64) int { // This computes 1 + (bits.Len64(v)-1)/7. // 9/64 is a good enough approximation of 1/7 - return int(9*uint32(bits.Len64(v))+64) / 64 + // + // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT + // instruction, which is very fast on CPUs from the last few years. The + // specific way of expressing the calculation matches C++ Protobuf, see + // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang + // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell). + + // By OR'ing v with 1, we guarantee that v is never 0, without changing the + // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler + // needs to add extra instructions to handle that case. + // + // The Go compiler currently (go1.24.4) does not make use of this knowledge. + // This opportunity (removing the XOR instruction, which handles the 0 case) + // results in a small (1%) performance win across CPU architectures. + // + // Independently of avoiding the 0 case, we need the v |= 1 line because + // it allows the Go compiler to eliminate an extra XCHGL barrier. + v |= 1 + + // It would be clearer to write log2value := 63 - uint32(...), but + // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel). + // Proof of identity for our value range [0..63]: + // https://go.dev/play/p/Pdn9hEWYakX + log2value := uint32(bits.LeadingZeros64(v)) ^ 63 + return int((log2value*9 + (64 + 9)) / 64) } // AppendFixed32 appends v to b as a little-endian uint32. diff --git a/openshift/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/openshift/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 5a57ef6f3c..04696351ee 100644 Binary files a/openshift/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/openshift/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/openshift/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/openshift/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 10132c9b38..a0aad2777f 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -69,6 +69,12 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value case genid.FeatureSet_JsonFormat_field_number: parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + case genid.FeatureSet_EnforceNamingStyle_field_number: + // EnforceNamingStyle is enforced in protoc, languages other than C++ + // are not supposed to do anything with this feature. + case genid.FeatureSet_DefaultSymbolVisibility_field_number: + // DefaultSymbolVisibility is enforced in protoc, runtimes should not + // inspect this value. default: panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) } diff --git a/openshift/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/openshift/vendor/google.golang.org/protobuf/internal/filedesc/presence.go new file mode 100644 index 0000000000..a12ec9791c --- /dev/null +++ b/openshift/vendor/google.golang.org/protobuf/internal/filedesc/presence.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import "google.golang.org/protobuf/reflect/protoreflect" + +// UsePresenceForField reports whether the presence bitmap should be used for +// the specified field. +func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneof fields never use the presence bitmap. + // + // Synthetic oneofs are an exception: Those are used to implement proto3 + // optional fields and hence should follow non-oneof field semantics. + return false, false + + case fd.IsMap(): + // Map-typed fields never use the presence bitmap. + return false, false + + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + // Lazy fields always use the presence bitmap (only messages can be lazy). + isLazy := fd.(interface{ IsLazy() bool }).IsLazy() + return isLazy, isLazy + + default: + // If the field has presence, use the presence bitmap. + return fd.HasPresence(), false + } +} diff --git a/openshift/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/openshift/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f918501..3ceb6fa7f5 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/openshift/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/openshift/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index f30ab6b586..950a6a325a 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -34,6 +34,19 @@ const ( Edition_EDITION_MAX_enum_value = 2147483647 ) +// Full and short names for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility" + SymbolVisibility_enum_name = "SymbolVisibility" +) + +// Enum values for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_VISIBILITY_UNSET_enum_value = 0 + SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1 + SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -65,6 +78,7 @@ const ( FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency" FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" FileDescriptorProto_Service_field_name protoreflect.Name = "service" @@ -79,6 +93,7 @@ const ( FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency" FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" @@ -96,6 +111,7 @@ const ( FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15 FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 @@ -124,6 +140,7 @@ const ( DescriptorProto_Options_field_name protoreflect.Name = "options" DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + DescriptorProto_Visibility_field_name protoreflect.Name = "visibility" DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" @@ -135,6 +152,7 @@ const ( DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" + DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility" ) // Field numbers for google.protobuf.DescriptorProto. @@ -149,6 +167,7 @@ const ( DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 + DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11 ) // Names for google.protobuf.DescriptorProto.ExtensionRange. @@ -388,12 +407,14 @@ const ( EnumDescriptorProto_Options_field_name protoreflect.Name = "options" EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility" EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" + EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility" ) // Field numbers for google.protobuf.EnumDescriptorProto. @@ -403,6 +424,7 @@ const ( EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 + EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. @@ -1008,29 +1030,35 @@ const ( // Field names for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" - FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" - FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" - FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" - FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" - FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" - - FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" - FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" - FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" - FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" - FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" - FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility" ) // Field numbers for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 - FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 - FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 - FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 - FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 - FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1112,6 +1140,40 @@ const ( FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 ) +// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle" + FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle" +) + +// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0 + FeatureSet_STYLE2024_enum_value = 1 + FeatureSet_STYLE_LEGACY_enum_value = 2 +) + +// Names for google.protobuf.FeatureSet.VisibilityFeature. +const ( + FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature" + FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature" +) + +// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility" + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility" +) + +// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0 + FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1 + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2 + FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3 + FeatureSet_VisibilityFeature_STRICT_enum_value = 4 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/openshift/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/openshift/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 41c1f74ef8..bdad12a9bb 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf // permit us to skip over definitely-unset fields at marshal time. var hasPresence bool - hasPresence, cf.isLazy = usePresenceForField(si, fd) + hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd) if hasPresence { cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) diff --git a/openshift/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/openshift/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index dd55e8e009..5a439daacb 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -11,6 +11,7 @@ import ( "strings" "sync/atomic" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] var fi fieldInfo - usePresence, _ := usePresenceForField(si, fd) + usePresence, _ := filedesc.UsePresenceForField(fd) switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): @@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return false } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return false } - rv := sp.AsValueOf(fs.Type.Elem()) return rv.Elem().Len() > 0 }, clear: func(p pointer) { - sp := p.Apply(fieldOffset).AtomicGetPointer() - if !sp.IsNil() { - rv := sp.AsValueOf(fs.Type.Elem()) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if !rv.IsNil() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } }, @@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return conv.Zero() } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return conv.Zero() } - rv := sp.AsValueOf(fs.Type.Elem()) if rv.Elem().Len() == 0 { return conv.Zero() } @@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) { func (mi *MessageInfo) present(p pointer, index uint32) bool { return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) } - -// usePresenceForField implements the somewhat intricate logic of when -// the presence bitmap is used for a field. The main logic is that a -// field that is optional or that can be lazy will use the presence -// bit, but for proto2, also maps have a presence bit. It also records -// if the field can ever be lazy, which is true if we have a -// lazyOffset and the field is a message or a slice of messages. A -// field that is lazy will always need a presence bit. Oneofs are not -// lazy and do not use presence, unless they are a synthetic oneof, -// which is a proto3 optional field. For proto3 optionals, we use the -// presence and they can also be lazy when applicable (a message). -func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { - hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() - - // Non-oneof scalar fields with explicit field presence use the presence array. - usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) - switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - return false, false - case fd.IsMap(): - return false, false - case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: - return hasLazyField, hasLazyField - default: - return usesPresenceArray || (hasLazyField && fd.HasPresence()), false - } -} diff --git a/openshift/vendor/google.golang.org/protobuf/internal/impl/presence.go b/openshift/vendor/google.golang.org/protobuf/internal/impl/presence.go index 914cb1deda..443afe81cd 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/impl/presence.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) { // Present checks for the presence of a specific field number in a presence set. func (p presence) Present(num uint32) bool { - if p.P == nil { - return false - } return Export{}.Present(p.toElem(num), num) } diff --git a/openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go similarity index 99% rename from openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go rename to openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 1ffddf6877..42dd6f70c6 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package strs import ( diff --git a/openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go deleted file mode 100644 index 832a7988f1..0000000000 --- a/openshift/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/openshift/vendor/google.golang.org/protobuf/internal/version/version.go b/openshift/vendor/google.golang.org/protobuf/internal/version/version.go index 01efc33030..697d1c14f3 100644 --- a/openshift/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/openshift/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 5 + Patch = 8 PreRelease = "" ) diff --git a/openshift/vendor/google.golang.org/protobuf/proto/merge.go b/openshift/vendor/google.golang.org/protobuf/proto/merge.go index 3c6fe57807..ef55b97dde 100644 --- a/openshift/vendor/google.golang.org/protobuf/proto/merge.go +++ b/openshift/vendor/google.golang.org/protobuf/proto/merge.go @@ -59,6 +59,12 @@ func Clone(m Message) Message { return dst.Interface() } +// CloneOf returns a deep copy of m. If the top-level message is invalid, +// it returns an invalid message as well. +func CloneOf[M Message](m M) M { + return Clone(m).(M) +} + // mergeOptions provides a namespace for merge functions, and can be // exported in the future if we add user-visible merge options. type mergeOptions struct{} diff --git a/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index ea154eec44..730331e666 100644 --- a/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "public_dependency", nil) case 11: b = p.appendRepeatedField(b, "weak_dependency", nil) + case 15: + b = p.appendRepeatedField(b, "option_dependency", nil) case 4: b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) case 5: @@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) case 10: b = p.appendRepeatedField(b, "reserved_name", nil) + case 11: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) case 5: b = p.appendRepeatedField(b, "reserved_name", nil) + case 6: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -398,6 +404,10 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "message_encoding", nil) case 6: b = p.appendSingularField(b, "json_format", nil) + case 7: + b = p.appendSingularField(b, "enforce_naming_style", nil) + case 8: + b = p.appendSingularField(b, "default_symbol_visibility", nil) } return b } diff --git a/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go similarity index 99% rename from openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go rename to openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index 479527b58d..fe17f37220 100644 --- a/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package protoreflect import ( diff --git a/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go deleted file mode 100644 index 0015fcb35d..0000000000 --- a/openshift/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v any) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x any) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/openshift/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/openshift/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a516337674..4eacb523c3 100644 --- a/openshift/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/openshift/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} } +// Describes the 'visibility' of a symbol with respect to the proto import +// system. Symbols can only be imported when the visibility rules do not prevent +// it (ex: local symbols cannot be imported). Visibility modifiers can only set +// on `message` and `enum` as they are the only types available to be referenced +// from other files. +type SymbolVisibility int32 + +const ( + SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0 + SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1 + SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2 +) + +// Enum value maps for SymbolVisibility. +var ( + SymbolVisibility_name = map[int32]string{ + 0: "VISIBILITY_UNSET", + 1: "VISIBILITY_LOCAL", + 2: "VISIBILITY_EXPORT", + } + SymbolVisibility_value = map[string]int32{ + "VISIBILITY_UNSET": 0, + "VISIBILITY_LOCAL": 1, + "VISIBILITY_EXPORT": 2, + } +) + +func (x SymbolVisibility) Enum() *SymbolVisibility { + p := new(SymbolVisibility) + *p = x + return p +} + +func (x SymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (SymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x SymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *SymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = SymbolVisibility(num) + return nil +} + +// Deprecated: Use SymbolVisibility.Descriptor instead. +func (SymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[10] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string { } func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() } func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] + return &file_google_protobuf_descriptor_proto_enumTypes[11] } func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { @@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string { } func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() } func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] + return &file_google_protobuf_descriptor_proto_enumTypes[12] } func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { @@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string { } func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() } func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] + return &file_google_protobuf_descriptor_proto_enumTypes[13] } func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { @@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string { } func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() } func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] + return &file_google_protobuf_descriptor_proto_enumTypes[14] } func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { @@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string { } func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() } func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] + return &file_google_protobuf_descriptor_proto_enumTypes[15] } func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { @@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string { } func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { @@ -1139,6 +1203,136 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} } +type FeatureSet_EnforceNamingStyle int32 + +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0 + FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1 + FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2 +) + +// Enum value maps for FeatureSet_EnforceNamingStyle. +var ( + FeatureSet_EnforceNamingStyle_name = map[int32]string{ + 0: "ENFORCE_NAMING_STYLE_UNKNOWN", + 1: "STYLE2024", + 2: "STYLE_LEGACY", + } + FeatureSet_EnforceNamingStyle_value = map[string]int32{ + "ENFORCE_NAMING_STYLE_UNKNOWN": 0, + "STYLE2024": 1, + "STYLE_LEGACY": 2, + } +) + +func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle { + p := new(FeatureSet_EnforceNamingStyle) + *p = x + return p +} + +func (x FeatureSet_EnforceNamingStyle) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() +} + +func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[17] +} + +func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnforceNamingStyle(num) + return nil +} + +// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead. +func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} +} + +type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32 + +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0 + // Default pre-EDITION_2024, all UNSET visibility are export. + FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1 + // All top-level symbols default to export, nested default to local. + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2 + // All symbols default to local. + FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3 + // All symbols local by default. Nested types cannot be exported. + // With special case caveat for message { enum {} reserved 1 to max; } + // This is the recommended setting for new protos. + FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4 +) + +// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility. +var ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{ + 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN", + 1: "EXPORT_ALL", + 2: "EXPORT_TOP_LEVEL", + 3: "LOCAL_ALL", + 4: "STRICT", + } + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{ + "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0, + "EXPORT_ALL": 1, + "EXPORT_TOP_LEVEL": 2, + "LOCAL_ALL": 3, + "STRICT": 4, + } +) + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility) + *p = x + return p +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor() +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[18] +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num) + return nil +} + +// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead. +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1177,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[19] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1262,6 +1456,9 @@ type FileDescriptorProto struct { // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // Names of files imported by this file purely for the purpose of providing + // option extensions. These are excluded from the dependency list above. + OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"` // All top-level definitions in this file. MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` @@ -1277,8 +1474,14 @@ type FileDescriptorProto struct { // The supported values are "proto2", "proto3", and "editions". // // If `edition` is present, this value must be "editions". + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1349,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 { return nil } +func (x *FileDescriptorProto) GetOptionDependency() []string { + if x != nil { + return x.OptionDependency + } + return nil +} + func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { if x != nil { return x.MessageType @@ -1419,7 +1629,9 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1524,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string { return nil } +func (x *DescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + type ExtensionRangeOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. @@ -1836,7 +2055,9 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1906,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string { return nil } +func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2212,6 +2440,9 @@ type FileOptions struct { // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. @@ -2482,6 +2713,9 @@ type MessageOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2639,7 +2873,10 @@ type FieldOptions struct { // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // DEPRECATED. DO NOT USE! // For Google-internal migration only. Do not use. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. @@ -2648,6 +2885,9 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. @@ -2740,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool { return Default_FieldOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetWeak() bool { if x != nil && x.Weak != nil { return *x.Weak @@ -2799,6 +3040,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2871,6 +3115,9 @@ type EnumOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2958,6 +3205,9 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` // Indicate that fields annotated with this enum value should not be printed // out when using debug formats, e.g. when the field contains sensitive @@ -3046,6 +3296,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { type ServiceOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -3124,6 +3377,9 @@ type MethodOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -3303,16 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` + DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { @@ -3387,6 +3645,20 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { return FeatureSet_JSON_FORMAT_UNKNOWN } +func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { + if x != nil && x.EnforceNamingStyle != nil { + return *x.EnforceNamingStyle + } + return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN +} + +func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + if x != nil && x.DefaultSymbolVisibility != nil { + return *x.DefaultSymbolVisibility + } + return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4047,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +type FeatureSet_VisibilityFeature struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FeatureSet_VisibilityFeature) Reset() { + *x = FeatureSet_VisibilityFeature{} + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FeatureSet_VisibilityFeature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet_VisibilityFeature) ProtoMessage() {} + +func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead. +func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + // A map from every known edition with a unique set of defaults to its // defaults. Not all editions may be contained here. For a given edition, // the defaults at the closest matching edition ordered at or before it should @@ -4064,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4076,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4212,7 +4520,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4224,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4296,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4308,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4361,777 +4669,389 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, - 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, - 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, - 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, - 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, - 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, - 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, - 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, - 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, - 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, - 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, - 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, - 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, - 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, - 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, - 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, - 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, - 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, - 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, - 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, - 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, - 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, - 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, - 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, - 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, - 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, - 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, - 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, - 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, - 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, - 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, - 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, - 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, - 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, - 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, - 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, - 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, - 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, - 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, - 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, - 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, - 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, - 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, - 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -}) +const file_google_protobuf_descriptor_proto_rawDesc = "" + + "\n" + + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + + "\x11FileDescriptorSet\x128\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" + + "\x13FileDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + + "\n" + + "dependency\x18\x03 \x03(\tR\n" + + "dependency\x12+\n" + + "\x11public_dependency\x18\n" + + " \x03(\x05R\x10publicDependency\x12'\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" + + "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" + + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + + "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" + + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" + + "\x0fDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + + "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" + + "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" + + "nestedType\x12A\n" + + "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" + + "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" + + "\n" + + "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" + + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\n" + + " \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1az\n" + + "\x0eExtensionRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + + "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" + + "\rReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" + + "\x15ExtensionRangeOptions\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" + + "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" + + "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" + + "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" + + "\vDeclaration\x12\x16\n" + + "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" + + "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" + + "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" + + "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" + + "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" + + "\x11VerificationState\x12\x0f\n" + + "\vDECLARATION\x10\x00\x12\x0e\n" + + "\n" + + "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" + + "\x14FieldDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" + + "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" + + "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" + + "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" + + "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" + + "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" + + "\voneof_index\x18\t \x01(\x05R\n" + + "oneofIndex\x12\x1b\n" + + "\tjson_name\x18\n" + + " \x01(\tR\bjsonName\x127\n" + + "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" + + "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" + + "\x04Type\x12\x0f\n" + + "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" + + "\n" + + "TYPE_FLOAT\x10\x02\x12\x0e\n" + + "\n" + + "TYPE_INT64\x10\x03\x12\x0f\n" + + "\vTYPE_UINT64\x10\x04\x12\x0e\n" + + "\n" + + "TYPE_INT32\x10\x05\x12\x10\n" + + "\fTYPE_FIXED64\x10\x06\x12\x10\n" + + "\fTYPE_FIXED32\x10\a\x12\r\n" + + "\tTYPE_BOOL\x10\b\x12\x0f\n" + + "\vTYPE_STRING\x10\t\x12\x0e\n" + + "\n" + + "TYPE_GROUP\x10\n" + + "\x12\x10\n" + + "\fTYPE_MESSAGE\x10\v\x12\x0e\n" + + "\n" + + "TYPE_BYTES\x10\f\x12\x0f\n" + + "\vTYPE_UINT32\x10\r\x12\r\n" + + "\tTYPE_ENUM\x10\x0e\x12\x11\n" + + "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" + + "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" + + "\vTYPE_SINT32\x10\x11\x12\x0f\n" + + "\vTYPE_SINT64\x10\x12\"C\n" + + "\x05Label\x12\x12\n" + + "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" + + "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" + + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + + "\x14OneofDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" + + "\x13EnumDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1a;\n" + + "\x11EnumReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + + "\x18EnumValueDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" + + "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" + + "\x16ServiceDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" + + "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" + + "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" + + "\x15MethodDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + + "\n" + + "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" + + "\voutput_type\x18\x03 \x01(\tR\n" + + "outputType\x128\n" + + "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" + + "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" + + "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" + + "\vFileOptions\x12!\n" + + "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" + + "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" + + "\x13java_multiple_files\x18\n" + + " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" + + "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" + + "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" + + "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" + + "\n" + + "go_package\x18\v \x01(\tR\tgoPackage\x125\n" + + "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" + + "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" + + "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" + + "\n" + + "deprecated\x18\x17 \x01(\b:\x05falseR\n" + + "deprecated\x12.\n" + + "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" + + "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" + + "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" + + "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" + + "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" + + "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" + + "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" + + "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" + + "\fOptimizeMode\x12\t\n" + + "\x05SPEED\x10\x01\x12\r\n" + + "\tCODE_SIZE\x10\x02\x12\x10\n" + + "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" + + "\x0eMessageOptions\x12<\n" + + "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" + + "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1b\n" + + "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "\"\xa1\r\n" + + "\fFieldOptions\x12A\n" + + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + + "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" + + "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" + + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1d\n" + + "\x04weak\x18\n" + + " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" + + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + + "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" + + "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" + + "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" + + "\x0eEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" + + "\x0eFeatureSupport\x12G\n" + + "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" + + "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" + + "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" + + "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" + + "\x05CType\x12\n" + + "\n" + + "\x06STRING\x10\x00\x12\b\n" + + "\x04CORD\x10\x01\x12\x10\n" + + "\fSTRING_PIECE\x10\x02\"5\n" + + "\x06JSType\x12\r\n" + + "\tJS_NORMAL\x10\x00\x12\r\n" + + "\tJS_STRING\x10\x01\x12\r\n" + + "\tJS_NUMBER\x10\x02\"U\n" + + "\x0fOptionRetention\x12\x15\n" + + "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" + + "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" + + "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" + + "\x10OptionTargetType\x12\x17\n" + + "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" + + "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" + + "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" + + "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" + + "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" + + "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" + + "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" + + "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" + + "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" + + "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" + + "\fOneofOptions\x127\n" + + "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" + + "\vEnumOptions\x12\x1f\n" + + "\vallow_alias\x18\x02 \x01(\bR\n" + + "allowAlias\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" + + "\x10EnumValueOptions\x12%\n" + + "\n" + + "deprecated\x18\x01 \x01(\b:\x05falseR\n" + + "deprecated\x127\n" + + "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" + + "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" + + "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" + + "\x0eServiceOptions\x127\n" + + "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" + + "\rMethodOptions\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12q\n" + + "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" + + "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" + + "\x10IdempotencyLevel\x12\x17\n" + + "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" + + "\n" + + "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" + + "\x13UninterpretedOption\x12A\n" + + "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" + + "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" + + "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" + + "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" + + "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" + + "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" + + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + + "\bNamePart\x12\x1b\n" + + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" + + "\n" + + "FeatureSet\x12\x91\x01\n" + + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + + "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" + + "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" + + "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" + + "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" + + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + + "jsonFormat\x12\xab\x01\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" + + "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" + + "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" + + "\x11VisibilityFeature\"\x81\x01\n" + + "\x17DefaultSymbolVisibility\x12%\n" + + "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" + + "\n" + + "EXPORT_ALL\x10\x01\x12\x14\n" + + "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" + + "\tLOCAL_ALL\x10\x03\x12\n" + + "\n" + + "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" + + "\rFieldPresence\x12\x1a\n" + + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + + "\bEXPLICIT\x10\x01\x12\f\n" + + "\bIMPLICIT\x10\x02\x12\x13\n" + + "\x0fLEGACY_REQUIRED\x10\x03\"7\n" + + "\bEnumType\x12\x15\n" + + "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" + + "\x04OPEN\x10\x01\x12\n" + + "\n" + + "\x06CLOSED\x10\x02\"V\n" + + "\x15RepeatedFieldEncoding\x12#\n" + + "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06PACKED\x10\x01\x12\f\n" + + "\bEXPANDED\x10\x02\"I\n" + + "\x0eUtf8Validation\x12\x1b\n" + + "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06VERIFY\x10\x02\x12\b\n" + + "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" + + "\x0fMessageEncoding\x12\x1c\n" + + "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" + + "\tDELIMITED\x10\x02\"H\n" + + "\n" + + "JsonFormat\x12\x17\n" + + "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" + + "\x05ALLOW\x10\x01\x12\x16\n" + + "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" + + "\x12EnforceNamingStyle\x12 \n" + + "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" + + "\tSTYLE2024\x10\x01\x12\x10\n" + + "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" + + "\x12FeatureSetDefaults\x12X\n" + + "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" + + "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" + + "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" + + "\x18FeatureSetEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" + + "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" + + "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" + + "\x0eSourceCodeInfo\x12D\n" + + "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" + + "\bLocation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" + + "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" + + "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" + + "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" + + "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" + + "\x11GeneratedCodeInfo\x12M\n" + + "\n" + + "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" + + "annotation\x1a\xeb\x01\n" + + "\n" + + "Annotation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" + + "\vsource_file\x18\x02 \x01(\tR\n" + + "sourceFile\x12\x14\n" + + "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" + + "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" + + "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" + + "\bSemantic\x12\b\n" + + "\x04NONE\x10\x00\x12\a\n" + + "\x03SET\x10\x01\x12\t\n" + + "\x05ALIAS\x10\x02*\xa7\x02\n" + + "\aEdition\x12\x13\n" + + "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" + + "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" + + "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" + + "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" + + "\fEDITION_2023\x10\xe8\a\x12\x11\n" + + "\fEDITION_2024\x10\xe9\a\x12\x17\n" + + "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" + + "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" + + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" + + "\x10SymbolVisibility\x12\x14\n" + + "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" + + "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" + + "\x11VISIBILITY_EXPORT\x10\x02B~\n" + + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once @@ -5145,143 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34) var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility + (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat + (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle + (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 30: google.protobuf.FileOptions + (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart + (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name + 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5294,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 17, - NumMessages: 33, + NumEnums: 20, + NumMessages: 34, NumExtensions: 0, NumServices: 0, }, diff --git a/openshift/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/openshift/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 28d24bad79..37e712b6b7 100644 --- a/openshift/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/openshift/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -228,63 +228,29 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, - 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, - 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, - 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, - 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, - 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, - 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, - 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, - 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, - 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, - 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, - 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, - 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, - 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, - 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -}) +const file_google_protobuf_go_features_proto_rawDesc = "" + + "\n" + + "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" + + "\n" + + "GoFeatures\x12\xbe\x01\n" + + "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" + + "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" + + "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" + + "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" + + "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" + + "\bAPILevel\x12\x19\n" + + "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" + + "\bAPI_OPEN\x10\x01\x12\x0e\n" + + "\n" + + "API_HYBRID\x10\x02\x12\x0e\n" + + "\n" + + "API_OPAQUE\x10\x03\"\x92\x01\n" + + "\x0fStripEnumPrefix\x12!\n" + + "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" + + "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" + + "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" + + "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb" var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once diff --git a/openshift/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/openshift/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 497da66e91..1ff0d1494d 100644 --- a/openshift/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/openshift/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = string([]byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_any_proto_rawDesc = "" + + "\n" + + "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" + + "\x03Any\x12\x19\n" + + "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05valueBv\n" + + "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_any_proto_rawDescOnce sync.Once diff --git a/openshift/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/openshift/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 193880d181..ca2e7b38f4 100644 --- a/openshift/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/openshift/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -289,24 +289,13 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_duration_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" + + "\bDuration\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" + + "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once diff --git a/openshift/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/openshift/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 00ac835c0b..06d584c14b 100644 --- a/openshift/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/openshift/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_timestamp_proto_rawDesc = "" + + "\n" + + "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" + + "\tTimestamp\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" + + "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once diff --git a/openshift/vendor/k8s.io/client-go/util/cert/cert.go b/openshift/vendor/k8s.io/client-go/util/cert/cert.go index 91e171271a..4805d09ab5 100644 --- a/openshift/vendor/k8s.io/client-go/util/cert/cert.go +++ b/openshift/vendor/k8s.io/client-go/util/cert/cert.go @@ -75,13 +75,15 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro CommonName: cfg.CommonName, Organization: cfg.Organization, }, - DNSNames: []string{cfg.CommonName}, NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, } + if len(cfg.CommonName) > 0 { + tmpl.DNSNames = []string{cfg.CommonName} + } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) if err != nil { diff --git a/openshift/vendor/modules.txt b/openshift/vendor/modules.txt index b6d9c9345d..8fcb2a11c5 100644 --- a/openshift/vendor/modules.txt +++ b/openshift/vendor/modules.txt @@ -1,3 +1,6 @@ +# github.com/Masterminds/semver/v3 v3.4.0 +## explicit; go 1.21 +github.com/Masterminds/semver/v3 # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -89,7 +92,7 @@ github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gophercloud/gophercloud/v2 v2.7.0 +# github.com/gophercloud/gophercloud/v2 v2.9.0 ## explicit; go 1.22 github.com/gophercloud/gophercloud/v2 github.com/gophercloud/gophercloud/v2/openstack @@ -135,7 +138,7 @@ github.com/gophercloud/utils/v2/env github.com/gophercloud/utils/v2/gnocchi github.com/gophercloud/utils/v2/internal github.com/gophercloud/utils/v2/openstack/clientconfig -# github.com/hashicorp/go-version v1.7.0 +# github.com/hashicorp/go-version v1.8.0 ## explicit github.com/hashicorp/go-version # github.com/imdario/mergo v0.3.16 @@ -164,12 +167,13 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/ginkgo/v2 v2.23.4 +# github.com/onsi/ginkgo/v2 v2.27.3 ## explicit; go 1.23.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter github.com/onsi/ginkgo/v2/ginkgo +github.com/onsi/ginkgo/v2/ginkgo/automaxprocs github.com/onsi/ginkgo/v2/ginkgo/build github.com/onsi/ginkgo/v2/ginkgo/command github.com/onsi/ginkgo/v2/ginkgo/generators @@ -183,10 +187,11 @@ github.com/onsi/ginkgo/v2/internal github.com/onsi/ginkgo/v2/internal/global github.com/onsi/ginkgo/v2/internal/interrupt_handler github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/internal/reporters github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.37.0 +# github.com/onsi/gomega v1.38.2 ## explicit; go 1.23.0 github.com/onsi/gomega github.com/onsi/gomega/format @@ -210,8 +215,8 @@ github.com/openshift/cluster-capi-operator/e2e/framework # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.22.0 -## explicit; go 1.22 +# github.com/prometheus/client_golang v1.23.2 +## explicit; go 1.23.0 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus @@ -219,32 +224,26 @@ github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/promhttp/internal -# github.com/prometheus/client_model v0.6.1 -## explicit; go 1.19 +# github.com/prometheus/client_model v0.6.2 +## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.62.0 -## explicit; go 1.21 +# github.com/prometheus/common v0.66.1 +## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.15.1 -## explicit; go 1.20 +# github.com/prometheus/procfs v0.16.1 +## explicit; go 1.23.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/spf13/pflag v1.0.6 +# github.com/spf13/pflag v1.0.10 ## explicit; go 1.12 github.com/spf13/pflag # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -# go.uber.org/automaxprocs v1.6.0 -## explicit; go 1.20 -go.uber.org/automaxprocs -go.uber.org/automaxprocs/internal/cgroups -go.uber.org/automaxprocs/internal/runtime -go.uber.org/automaxprocs/maxprocs -# go.uber.org/mock v0.5.2 -## explicit; go 1.23 +# go.uber.org/mock v0.6.0 +## explicit; go 1.23.0 go.uber.org/mock/gomock go.uber.org/mock/mockgen/model # go.uber.org/multierr v1.11.0 @@ -261,10 +260,19 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore +# go.yaml.in/yaml/v2 v2.4.2 +## explicit; go 1.15 +go.yaml.in/yaml/v2 +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 +go.yaml.in/yaml/v3 # golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 ## explicit; go 1.20 golang.org/x/exp/maps -# golang.org/x/net v0.40.0 +# golang.org/x/mod v0.27.0 +## explicit; go 1.23.0 +golang.org/x/mod/semver +# golang.org/x/net v0.43.0 ## explicit; go 1.23.0 golang.org/x/net/html golang.org/x/net/html/atom @@ -274,19 +282,22 @@ golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/httpcommon -# golang.org/x/oauth2 v0.24.0 -## explicit; go 1.18 +# golang.org/x/oauth2 v0.30.0 +## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.33.0 +# golang.org/x/sync v0.16.0 +## explicit; go 1.23.0 +golang.org/x/sync/errgroup +# golang.org/x/sys v0.35.0 ## explicit; go 1.23.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.32.0 +# golang.org/x/term v0.34.0 ## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.26.0 +# golang.org/x/text v0.28.0 ## explicit; go 1.23.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -311,16 +322,33 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.33.0 +# golang.org/x/tools v0.36.0 ## explicit; go 1.23.0 golang.org/x/tools/cover +golang.org/x/tools/go/ast/edge golang.org/x/tools/go/ast/inspector -golang.org/x/tools/internal/astutil/edge +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil +golang.org/x/tools/internal/aliases +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gcimporter +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/stdlib +golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/protobuf v1.36.5 -## explicit; go 1.21 +# google.golang.org/protobuf v1.36.8 +## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -365,7 +393,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.31.9 +# k8s.io/api v0.31.14 ## explicit; go 1.22.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -424,11 +452,11 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.31.9 +# k8s.io/apiextensions-apiserver v0.31.14 ## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.31.9 +# k8s.io/apimachinery v0.31.14 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -482,7 +510,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.31.9 +# k8s.io/client-go v0.31.14 ## explicit; go 1.22.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -782,7 +810,7 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/cluster-api v1.9.8 +# sigs.k8s.io/cluster-api v1.9.11 ## explicit; go 1.22.0 sigs.k8s.io/cluster-api/api/v1beta1 sigs.k8s.io/cluster-api/errors @@ -858,8 +886,8 @@ sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value -# sigs.k8s.io/yaml v1.4.0 -## explicit; go 1.12 +# sigs.k8s.io/yaml v1.6.0 +## explicit; go 1.22 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 # sigs.k8s.io/cluster-api-provider-openstack => ../ diff --git a/openshift/vendor/sigs.k8s.io/yaml/.travis.yml b/openshift/vendor/sigs.k8s.io/yaml/.travis.yml deleted file mode 100644 index 54ed8f9cb9..0000000000 --- a/openshift/vendor/sigs.k8s.io/yaml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: arm64 -dist: focal -go: 1.15.x -script: - - diff -u <(echo -n) <(gofmt -d *.go) - - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) - - GO111MODULE=on go vet . - - GO111MODULE=on go test -v -race ./... - - git diff --exit-code -install: - - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS deleted file mode 100644 index 73be0a3a9b..0000000000 --- a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS +++ /dev/null @@ -1,24 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dims -- jpbetz -- smarterclayton -- deads2k -- sttts -- liggitt -- natasha41575 -- knverey -reviewers: -- dims -- thockin -- jpbetz -- smarterclayton -- deads2k -- derekwaynecarr -- mikedanese -- liggitt -- sttts -- tallclair -labels: -- sig/api-machinery diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md index 53f4139dc3..9a8f1e6782 100644 --- a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md +++ b/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md @@ -1,143 +1,71 @@ -# go-yaml fork +# goyaml.v2 -This package is a fork of the go-yaml library and is intended solely for consumption -by kubernetes projects. In this fork, we plan to support only critical changes required for -kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests -should be made in the upstream go-yaml library, and we will reject such changes in this fork -unless we are pulling them from upstream. +This package provides type and function aliases for the `go.yaml.in/yaml/v2` package (which is compatible with `gopkg.in/yaml.v2`). -This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 +## Purpose -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` +The purpose of this package is to: -This example will generate the following output: +1. Provide a transition path for users migrating from the sigs.k8s.io/yaml package to direct usage of go.yaml.in/yaml/v2 +2. Maintain compatibility with existing code while encouraging migration to the upstream package +3. Reduce maintenance overhead by delegating to the upstream implementation +## Usage + +Instead of importing this package directly, you should migrate to using `go.yaml.in/yaml/v2` directly: + +```go +// Old way +import "sigs.k8s.io/yaml/goyaml.v2" + +// Recommended way +import "go.yaml.in/yaml/v2" ``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` +## Available Types and Functions + +All public types and functions from `go.yaml.in/yaml/v2` are available through this package: + +### Types + +- `MapSlice` - Encodes and decodes as a YAML map with preserved key order +- `MapItem` - An item in a MapSlice +- `Unmarshaler` - Interface for custom unmarshaling behavior +- `Marshaler` - Interface for custom marshaling behavior +- `IsZeroer` - Interface to check if an object is zero +- `Decoder` - Reads and decodes YAML values from an input stream +- `Encoder` - Writes YAML values to an output stream +- `TypeError` - Error returned by Unmarshal for decoding issues + +### Functions + +- `Unmarshal` - Decodes YAML data into a Go value +- `UnmarshalStrict` - Like Unmarshal but errors on unknown fields +- `Marshal` - Serializes a Go value into YAML +- `NewDecoder` - Creates a new Decoder +- `NewEncoder` - Creates a new Encoder +- `FutureLineWrap` - Controls line wrapping behavior + +## Migration Guide + +To migrate from this package to `go.yaml.in/yaml/v2`: + +1. Update your import statements: + ```go + // From + import "sigs.k8s.io/yaml/goyaml.v2" + + // To + import "go.yaml.in/yaml/v2" + ``` + +2. No code changes should be necessary as the API is identical + +3. Update your go.mod file to include the dependency: + ``` + require go.yaml.in/yaml/v2 v2.4.2 + ``` + +## Deprecation Notice + +All types and functions in this package are marked as deprecated. You should migrate to using `go.yaml.in/yaml/v2` directly. diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go b/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go new file mode 100644 index 0000000000..8c82bc2cb9 --- /dev/null +++ b/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go @@ -0,0 +1,85 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + gopkg_yaml "go.yaml.in/yaml/v2" +) + +// Type aliases for public types from go.yaml.in/yaml/v2 +type ( + // MapSlice encodes and decodes as a YAML map. + // The order of keys is preserved when encoding and decoding. + // Deprecated: Use go.yaml.in/yaml/v2.MapSlice directly. + MapSlice = gopkg_yaml.MapSlice + + // MapItem is an item in a MapSlice. + // Deprecated: Use go.yaml.in/yaml/v2.MapItem directly. + MapItem = gopkg_yaml.MapItem + + // Unmarshaler is implemented by types to customize their behavior when being unmarshaled from a YAML document. + // Deprecated: Use go.yaml.in/yaml/v2.Unmarshaler directly. + Unmarshaler = gopkg_yaml.Unmarshaler + + // Marshaler is implemented by types to customize their behavior when being marshaled into a YAML document. + // Deprecated: Use go.yaml.in/yaml/v2.Marshaler directly. + Marshaler = gopkg_yaml.Marshaler + + // IsZeroer is used to check whether an object is zero to determine whether it should be omitted when + // marshaling with the omitempty flag. One notable implementation is time.Time. + // Deprecated: Use go.yaml.in/yaml/v2.IsZeroer directly. + IsZeroer = gopkg_yaml.IsZeroer + + // Decoder reads and decodes YAML values from an input stream. + // Deprecated: Use go.yaml.in/yaml/v2.Decoder directly. + Decoder = gopkg_yaml.Decoder + + // Encoder writes YAML values to an output stream. + // Deprecated: Use go.yaml.in/yaml/v2.Encoder directly. + Encoder = gopkg_yaml.Encoder + + // TypeError is returned by Unmarshal when one or more fields in the YAML document cannot be properly decoded. + // Deprecated: Use go.yaml.in/yaml/v2.TypeError directly. + TypeError = gopkg_yaml.TypeError +) + +// Function aliases for public functions from go.yaml.in/yaml/v2 +var ( + // Unmarshal decodes the first document found within the in byte slice and assigns decoded values into the out value. + // Deprecated: Use go.yaml.in/yaml/v2.Unmarshal directly. + Unmarshal = gopkg_yaml.Unmarshal + + // UnmarshalStrict is like Unmarshal except that any fields that are found in the data that do not have corresponding struct members will result in an error. + // Deprecated: Use go.yaml.in/yaml/v2.UnmarshalStrict directly. + UnmarshalStrict = gopkg_yaml.UnmarshalStrict + + // Marshal serializes the value provided into a YAML document. + // Deprecated: Use go.yaml.in/yaml/v2.Marshal directly. + Marshal = gopkg_yaml.Marshal + + // NewDecoder returns a new decoder that reads from r. + // Deprecated: Use go.yaml.in/yaml/v2.NewDecoder directly. + NewDecoder = gopkg_yaml.NewDecoder + + // NewEncoder returns a new encoder that writes to w. + // Deprecated: Use go.yaml.in/yaml/v2.NewEncoder directly. + NewEncoder = gopkg_yaml.NewEncoder + + // FutureLineWrap globally disables line wrapping when encoding long strings. + // Deprecated: Use go.yaml.in/yaml/v2.FutureLineWrap directly. + FutureLineWrap = gopkg_yaml.FutureLineWrap +) diff --git a/openshift/vendor/sigs.k8s.io/yaml/yaml.go b/openshift/vendor/sigs.k8s.io/yaml/yaml.go index fc10246bdb..aa01acd45d 100644 --- a/openshift/vendor/sigs.k8s.io/yaml/yaml.go +++ b/openshift/vendor/sigs.k8s.io/yaml/yaml.go @@ -24,7 +24,7 @@ import ( "reflect" "strconv" - "sigs.k8s.io/yaml/goyaml.v2" + "go.yaml.in/yaml/v2" ) // Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) @@ -92,7 +92,7 @@ func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { d = opt(d) } if err := d.Decode(&obj); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) + return fmt.Errorf("while decoding JSON: %w", err) } return nil } @@ -417,3 +417,10 @@ func jsonToYAMLValue(j interface{}) interface{} { } return j } + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/pkg/cloud/services/networking/network.go b/pkg/cloud/services/networking/network.go index 2dbafc3eb4..4483c37829 100644 --- a/pkg/cloud/services/networking/network.go +++ b/pkg/cloud/services/networking/network.go @@ -25,6 +25,7 @@ import ( "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/external" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" @@ -201,6 +202,10 @@ func (s *Service) ReconcileSubnet(openStackCluster *infrav1.OpenStackCluster, cl } else if len(subnetList) == 1 { subnet = &subnetList[0] s.scope.Logger().V(6).Info("Reusing existing subnet", "name", subnet.Name, "id", subnet.ID) + + if err := s.updateSubnetDNSNameservers(openStackCluster, subnet); err != nil { + return err + } } openStackCluster.Status.Network.Subnets = []infrav1.Subnet{ @@ -248,6 +253,39 @@ func (s *Service) createSubnet(openStackCluster *infrav1.OpenStackCluster, clust return subnet, nil } +// updateSubnetDNSNameservers updates the DNS nameservers for an existing subnet if they differ from the desired configuration. +func (s *Service) updateSubnetDNSNameservers(openStackCluster *infrav1.OpenStackCluster, subnet *subnets.Subnet) error { + // Picking the first managed subnet since we only support one for now + desiredNameservers := openStackCluster.Spec.ManagedSubnets[0].DNSNameservers + currentNameservers := subnet.DNSNameservers + + var needsUpdate bool + if len(desiredNameservers) != len(currentNameservers) { + needsUpdate = true + } else { + needsUpdate = !equality.Semantic.DeepEqual(currentNameservers, desiredNameservers) + } + + if needsUpdate { + s.scope.Logger().Info("Updating subnet DNS nameservers", "id", subnet.ID, "from", currentNameservers, "to", desiredNameservers) + + updateOpts := subnets.UpdateOpts{ + DNSNameservers: &desiredNameservers, + } + + updatedSubnet, err := s.client.UpdateSubnet(subnet.ID, updateOpts) + if err != nil { + record.Warnf(openStackCluster, "FailedUpdateSubnet", "Failed to update DNS nameservers for subnet %s: %v", subnet.ID, err) + return err + } + + *subnet = *updatedSubnet + record.Eventf(openStackCluster, "SuccessfulUpdateSubnet", "Updated DNS nameservers for subnet %s", subnet.ID) + } + + return nil +} + func (s *Service) getNetworkByName(networkName string) (networks.Network, error) { opts := networks.ListOpts{ Name: networkName, diff --git a/pkg/cloud/services/networking/network_test.go b/pkg/cloud/services/networking/network_test.go index 200ffc30d8..01f86191c7 100644 --- a/pkg/cloud/services/networking/network_test.go +++ b/pkg/cloud/services/networking/network_test.go @@ -39,6 +39,99 @@ const ( clusterResourceName = "test-cluster" ) +func Test_updateSubnetDNSNameservers(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + const subnetID = "subnet-123" + + tests := []struct { + name string + currentNameservers []string + desiredNameservers []string + expect func(m *mock.MockNetworkClientMockRecorder) + }{ + { + name: "no changes needed", + currentNameservers: []string{"8.8.8.8", "8.8.4.4"}, + desiredNameservers: []string{"8.8.8.8", "8.8.4.4"}, + expect: func(*mock.MockNetworkClientMockRecorder) {}, + }, + { + name: "different nameservers", + currentNameservers: []string{"8.8.8.8", "8.8.4.4"}, + desiredNameservers: []string{"1.1.1.1", "1.0.0.1"}, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m.UpdateSubnet(subnetID, subnets.UpdateOpts{ + DNSNameservers: &[]string{"1.1.1.1", "1.0.0.1"}, + }).Return(&subnets.Subnet{ + ID: subnetID, + DNSNameservers: []string{"1.1.1.1", "1.0.0.1"}, + }, nil) + }, + }, + { + name: "different count", + currentNameservers: []string{"8.8.8.8"}, + desiredNameservers: []string{"8.8.8.8", "8.8.4.4"}, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m.UpdateSubnet(subnetID, subnets.UpdateOpts{ + DNSNameservers: &[]string{"8.8.8.8", "8.8.4.4"}, + }).Return(&subnets.Subnet{ + ID: subnetID, + DNSNameservers: []string{"8.8.8.8", "8.8.4.4"}, + }, nil) + }, + }, + { + name: "same nameservers but different order", + currentNameservers: []string{"8.8.8.8", "8.8.4.4"}, + desiredNameservers: []string{"8.8.4.4", "8.8.8.8"}, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m.UpdateSubnet(subnetID, subnets.UpdateOpts{ + DNSNameservers: &[]string{"8.8.4.4", "8.8.8.8"}, + }).Return(&subnets.Subnet{ + ID: subnetID, + DNSNameservers: []string{"8.8.4.4", "8.8.8.8"}, + }, nil) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + mockClient := mock.NewMockNetworkClient(mockCtrl) + tt.expect(mockClient.EXPECT()) + + cluster := &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSubnets: []infrav1.SubnetSpec{ + { + DNSNameservers: tt.desiredNameservers, + }, + }, + }, + } + subnet := &subnets.Subnet{ + ID: subnetID, + DNSNameservers: tt.currentNameservers, + } + + scopeFactory := scope.NewMockScopeFactory(mockCtrl, "") + log := testr.New(t) + s := Service{ + client: mockClient, + scope: scope.NewWithLogger(scopeFactory, log), + } + + err := s.updateSubnetDNSNameservers(cluster, subnet) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(subnet.DNSNameservers).To(Equal(tt.desiredNameservers)) + }) + } +} + func Test_ReconcileNetwork(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() @@ -460,7 +553,8 @@ func Test_ReconcileSubnet(t *testing.T) { fakeSubnetID := "d08803fc-2fa5-4179-b9d7-8c43d0af2fe6" fakeCIDR := "10.0.0.0/24" fakeNetworkID := "d08803fc-2fa5-4279-b9f7-8c45d0ff2fe6" - fakeDNS := "10.0.10.200" + fakeDNS1 := "10.0.10.200" + fakeDNS2 := "10.0.10.201" tests := []struct { name string @@ -571,7 +665,7 @@ func Test_ReconcileSubnet(t *testing.T) { ManagedSubnets: []infrav1.SubnetSpec{ { CIDR: fakeCIDR, - DNSNameservers: []string{fakeDNS}, + DNSNameservers: []string{fakeDNS1}, }, }, }, @@ -595,7 +689,7 @@ func Test_ReconcileSubnet(t *testing.T) { IPVersion: 4, CIDR: fakeCIDR, Description: expectedSubnetDesc, - DNSNameservers: []string{fakeDNS}, + DNSNameservers: []string{fakeDNS1}, }). Return(&subnets.Subnet{ ID: fakeSubnetID, @@ -690,6 +784,288 @@ func Test_ReconcileSubnet(t *testing.T) { }, }, }, + { + name: "existing subnet with different DNS nameservers - update needed", + openStackCluster: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1}, + }, + }, + }, + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + }, + }, + }, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m. + ListSubnet(subnets.ListOpts{NetworkID: fakeNetworkID, CIDR: fakeCIDR}). + Return([]subnets.Subnet{ + { + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS2}, + }, + }, nil) + + updateOpts := subnets.UpdateOpts{ + DNSNameservers: &[]string{fakeDNS1}, + } + + m.UpdateSubnet(fakeSubnetID, updateOpts). + Return(&subnets.Subnet{ + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1}, + }, nil). + Times(1) + }, + want: &infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + Subnets: []infrav1.Subnet{ + { + Name: expectedSubnetName, + ID: fakeSubnetID, + CIDR: fakeCIDR, + }, + }, + }, + }, + }, + { + name: "existing subnet with same DNS nameservers - no update needed", + openStackCluster: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1}, + }, + }, + }, + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + }, + }, + }, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m. + ListSubnet(subnets.ListOpts{NetworkID: fakeNetworkID, CIDR: fakeCIDR}). + Return([]subnets.Subnet{ + { + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1}, + }, + }, nil) + }, + want: &infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + Subnets: []infrav1.Subnet{ + { + Name: expectedSubnetName, + ID: fakeSubnetID, + CIDR: fakeCIDR, + }, + }, + }, + }, + }, + { + name: "existing subnet with multiple different DNS nameservers - update needed", + openStackCluster: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1, fakeDNS2}, + }, + }, + }, + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + }, + }, + }, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m. + ListSubnet(subnets.ListOpts{NetworkID: fakeNetworkID, CIDR: fakeCIDR}). + Return([]subnets.Subnet{ + { + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1}, + }, + }, nil) + + updateOpts := subnets.UpdateOpts{ + DNSNameservers: &[]string{fakeDNS1, fakeDNS2}, + } + + m.UpdateSubnet(fakeSubnetID, updateOpts). + Return(&subnets.Subnet{ + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1, fakeDNS2}, + }, nil). + Times(1) + }, + want: &infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + Subnets: []infrav1.Subnet{ + { + Name: expectedSubnetName, + ID: fakeSubnetID, + CIDR: fakeCIDR, + }, + }, + }, + }, + }, + { + name: "existing subnet with multiple (invert) different DNS nameservers - update needed", + openStackCluster: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS2, fakeDNS1}, + }, + }, + }, + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + }, + }, + }, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m. + ListSubnet(subnets.ListOpts{NetworkID: fakeNetworkID, CIDR: fakeCIDR}). + Return([]subnets.Subnet{ + { + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS2}, + }, + }, nil) + + updateOpts := subnets.UpdateOpts{ + DNSNameservers: &[]string{fakeDNS2, fakeDNS1}, + } + + m.UpdateSubnet(fakeSubnetID, updateOpts). + Return(&subnets.Subnet{ + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS2, fakeDNS1}, + }, nil). + Times(1) + }, + want: &infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + Subnets: []infrav1.Subnet{ + { + Name: expectedSubnetName, + ID: fakeSubnetID, + CIDR: fakeCIDR, + }, + }, + }, + }, + }, + { + name: "existing subnet with no DNS nameservers initially - add DNS nameservers", + openStackCluster: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1}, + }, + }, + }, + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + }, + }, + }, + expect: func(m *mock.MockNetworkClientMockRecorder) { + m. + ListSubnet(subnets.ListOpts{NetworkID: fakeNetworkID, CIDR: fakeCIDR}). + Return([]subnets.Subnet{ + { + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{}, + }, + }, nil) + + updateOpts := subnets.UpdateOpts{ + DNSNameservers: &[]string{fakeDNS1}, + } + + m.UpdateSubnet(fakeSubnetID, updateOpts). + Return(&subnets.Subnet{ + ID: fakeSubnetID, + Name: expectedSubnetName, + CIDR: fakeCIDR, + DNSNameservers: []string{fakeDNS1}, + }, nil). + Times(1) + }, + want: &infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: fakeNetworkID, + }, + Subnets: []infrav1.Subnet{ + { + Name: expectedSubnetName, + ID: fakeSubnetID, + CIDR: fakeCIDR, + }, + }, + }, + }, + }, } for _, tt := range tests { diff --git a/pkg/cloud/services/networking/securitygroups_rules.go b/pkg/cloud/services/networking/securitygroups_rules.go index d74ff42185..110a89d441 100644 --- a/pkg/cloud/services/networking/securitygroups_rules.go +++ b/pkg/cloud/services/networking/securitygroups_rules.go @@ -258,20 +258,19 @@ func getSGWorkerAllowAll(remoteGroupIDSelf, secControlPlaneGroupID string) []res // Permit ports that defined in openStackCluster.Spec.APIServerLoadBalancer.AdditionalPorts. func getSGControlPlaneAdditionalPorts(ports []int) []resolvedSecurityGroupRuleSpec { controlPlaneRules := []resolvedSecurityGroupRuleSpec{} - - r := []resolvedSecurityGroupRuleSpec{ - { - Description: "Additional ports", - Direction: "ingress", - EtherType: "IPv4", - Protocol: "tcp", - }, - } + // Preallocate r with len(ports) + r := make([]resolvedSecurityGroupRuleSpec, len(ports)) for i, p := range ports { - r[i].PortRangeMin = p - r[i].PortRangeMax = p - controlPlaneRules = append(controlPlaneRules, r...) + r[i] = resolvedSecurityGroupRuleSpec{ + Description: "Additional port", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "tcp", + PortRangeMin: p, + PortRangeMax: p, + } } + controlPlaneRules = append(controlPlaneRules, r...) return controlPlaneRules } diff --git a/pkg/cloud/services/networking/securitygroups_test.go b/pkg/cloud/services/networking/securitygroups_test.go index d304261931..6dc01b49c2 100644 --- a/pkg/cloud/services/networking/securitygroups_test.go +++ b/pkg/cloud/services/networking/securitygroups_test.go @@ -681,3 +681,62 @@ func TestService_ReconcileSecurityGroups(t *testing.T) { }) } } + +func TestGetSGControlPlaneAdditionalPorts(t *testing.T) { + tests := []struct { + name string + ports []int + want []resolvedSecurityGroupRuleSpec + }{ + { + name: "no ports", + ports: []int{}, + want: []resolvedSecurityGroupRuleSpec{}, + }, + { + name: "single port", + ports: []int{6443}, + want: []resolvedSecurityGroupRuleSpec{ + { + Description: "Additional port", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "tcp", + PortRangeMin: 6443, + PortRangeMax: 6443, + }, + }, + }, + { + name: "multiple ports", + ports: []int{80, 443}, + want: []resolvedSecurityGroupRuleSpec{ + { + Description: "Additional port", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "tcp", + PortRangeMin: 80, + PortRangeMax: 80, + }, + { + Description: "Additional port", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "tcp", + PortRangeMin: 443, + PortRangeMax: 443, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getSGControlPlaneAdditionalPorts(tt.ports) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("getSGControlPlaneAdditionalPorts() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/webhooks/openstackcluster_webhook.go b/pkg/webhooks/openstackcluster_webhook.go index 19a2571b2a..a237572a8e 100644 --- a/pkg/webhooks/openstackcluster_webhook.go +++ b/pkg/webhooks/openstackcluster_webhook.go @@ -127,7 +127,7 @@ func allowSubnetFilterToIDTransition(oldObj, newObj *infrav1.OpenStackCluster) b } // ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type. -func (*openStackClusterWebhook) ValidateUpdate(_ context.Context, oldObjRaw, newObjRaw runtime.Object) (admission.Warnings, error) { +func (*openStackClusterWebhook) ValidateUpdate(_ context.Context, oldObjRaw, newObjRaw runtime.Object) (admission.Warnings, error) { //nolint:gocyclo,cyclop var allErrs field.ErrorList oldObj, err := castToOpenStackCluster(oldObjRaw) if err != nil { @@ -193,6 +193,40 @@ func (*openStackClusterWebhook) ValidateUpdate(_ context.Context, oldObjRaw, new newObj.Spec.ManagedSecurityGroups.AllowAllInClusterTraffic = false } + // Allow changes only to DNSNameservers in ManagedSubnets spec + if newObj.Spec.ManagedSubnets != nil && oldObj.Spec.ManagedSubnets != nil { + // Check if any fields other than DNSNameservers have changed + if len(oldObj.Spec.ManagedSubnets) != len(newObj.Spec.ManagedSubnets) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "managedSubnets"), "cannot add or remove subnets")) + } else { + // Build maps of subnets by CIDR + oldSubnetMap := make(map[string]*infrav1.SubnetSpec) + + for i := range oldObj.Spec.ManagedSubnets { + oldSubnet := &oldObj.Spec.ManagedSubnets[i] + oldSubnetMap[oldSubnet.CIDR] = oldSubnet + } + + // Check if all new subnets have matching old subnets with the same CIDR + for i := range newObj.Spec.ManagedSubnets { + newSubnet := &newObj.Spec.ManagedSubnets[i] + + oldSubnet, exists := oldSubnetMap[newSubnet.CIDR] + if !exists { + allErrs = append(allErrs, field.Forbidden( + field.NewPath("spec", "managedSubnets"), + fmt.Sprintf("cannot change subnet CIDR from existing value to %s", newSubnet.CIDR), + )) + continue + } + + // DNSNameservers is mutable + oldSubnet.DNSNameservers = nil + newSubnet.DNSNameservers = nil + } + } + } + // Allow changes on AllowedCIDRs if newObj.Spec.APIServerLoadBalancer != nil && oldObj.Spec.APIServerLoadBalancer != nil { oldObj.Spec.APIServerLoadBalancer.AllowedCIDRs = []string{} diff --git a/pkg/webhooks/openstackcluster_webhook_test.go b/pkg/webhooks/openstackcluster_webhook_test.go index 7c8b8526b5..7ff7bcdba1 100644 --- a/pkg/webhooks/openstackcluster_webhook_test.go +++ b/pkg/webhooks/openstackcluster_webhook_test.go @@ -902,6 +902,441 @@ func TestOpenStackCluster_ValidateUpdate(t *testing.T) { }, wantErr: true, }, + + { + name: "Changing OpenStackCluster.Spec.ManagedSubnets.DNSNameservers is allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + "8.8.4.4", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "1.1.1.1", + "1.0.0.1", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "Adding new DNSNameserver to OpenStackCluster.Spec.ManagedSubnets.DNSNameservers is allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + "8.8.4.4", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "Removing DNSNameservers from OpenStackCluster.Spec.ManagedSubnets is allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + "8.8.4.4", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{}, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "Multiple subnets with DNSNameservers changes are allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + { + CIDR: "192.168.2.0/24", + DNSNameservers: []string{ + "8.8.4.4", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.2.10", + End: "192.168.2.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "1.1.1.1", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + { + CIDR: "192.168.2.0/24", + DNSNameservers: []string{ + "1.0.0.1", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.2.10", + End: "192.168.2.100", + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "Changing CIDR in OpenStackCluster.Spec.ManagedSubnets is not allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "10.0.0.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "10.0.0.10", + End: "10.0.0.100", + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Modifying AllocationPools in OpenStackCluster.Spec.ManagedSubnets is not allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.20", + End: "192.168.1.200", + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Adding a new subnet to OpenStackCluster.Spec.ManagedSubnets is not allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + { + CIDR: "192.168.2.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.2.10", + End: "192.168.2.100", + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Removing a subnet from OpenStackCluster.Spec.ManagedSubnets is not allowed", + oldTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + { + CIDR: "192.168.2.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.2.10", + End: "192.168.2.100", + }, + }, + }, + }, + }, + }, + newTemplate: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "foobar", + CloudName: "foobar", + }, + ManagedSubnets: []infrav1.SubnetSpec{ + { + CIDR: "192.168.1.0/24", + DNSNameservers: []string{ + "8.8.8.8", + }, + AllocationPools: []infrav1.AllocationPool{ + { + Start: "192.168.1.10", + End: "192.168.1.100", + }, + }, + }, + }, + }, + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/templates/cluster-template-flatcar-sysext.yaml b/templates/cluster-template-flatcar-sysext.yaml index ec33bdeff2..a02289a09c 100644 --- a/templates/cluster-template-flatcar-sysext.yaml +++ b/templates/cluster-template-flatcar-sysext.yaml @@ -135,9 +135,6 @@ metadata: spec: kubeadmConfigSpec: clusterConfiguration: - apiServer: - extraArgs: - cloud-provider: external controllerManager: extraArgs: cloud-provider: external diff --git a/templates/cluster-template-flatcar.yaml b/templates/cluster-template-flatcar.yaml index 4282265943..bc48e49971 100644 --- a/templates/cluster-template-flatcar.yaml +++ b/templates/cluster-template-flatcar.yaml @@ -97,9 +97,6 @@ metadata: spec: kubeadmConfigSpec: clusterConfiguration: - apiServer: - extraArgs: - cloud-provider: external controllerManager: extraArgs: cloud-provider: external diff --git a/templates/cluster-template-without-lb.yaml b/templates/cluster-template-without-lb.yaml index 6dbf7a72d1..d6bf52a055 100644 --- a/templates/cluster-template-without-lb.yaml +++ b/templates/cluster-template-without-lb.yaml @@ -73,9 +73,6 @@ metadata: spec: kubeadmConfigSpec: clusterConfiguration: - apiServer: - extraArgs: - cloud-provider: external controllerManager: extraArgs: cloud-provider: external diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index 3508d37e26..d419a7c3f8 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -73,9 +73,6 @@ metadata: spec: kubeadmConfigSpec: clusterConfiguration: - apiServer: - extraArgs: - cloud-provider: external controllerManager: extraArgs: cloud-provider: external diff --git a/templates/clusterclass-dev-test.yaml b/templates/clusterclass-dev-test.yaml index e4181b24ec..345eb80ef1 100644 --- a/templates/clusterclass-dev-test.yaml +++ b/templates/clusterclass-dev-test.yaml @@ -308,9 +308,6 @@ spec: spec: kubeadmConfigSpec: clusterConfiguration: - apiServer: - extraArgs: - cloud-provider: external controllerManager: extraArgs: cloud-provider: external diff --git a/templates/image-template-bastion.yaml b/templates/image-template-bastion.yaml new file mode 100644 index 0000000000..d333ea0415 --- /dev/null +++ b/templates/image-template-bastion.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: openstack.k-orc.cloud/v1alpha1 +kind: Image +metadata: + name: bastion-image +spec: + managementPolicy: managed + resource: + name: ubuntu-24.04 + content: + diskFormat: qcow2 + download: + url: ${BASTION_IMAGE_URL:="https://cloud-images.ubuntu.com/releases/noble/release/ubuntu-24.04-server-cloudimg-amd64.img"} + cloudCredentialsRef: + secretName: ${CLOUD_CONFIG_SECRET:=dev-test-cloud-config} + cloudName: ${OPENSTACK_CLOUD:=capo-e2e} diff --git a/templates/image-template-node.yaml b/templates/image-template-node.yaml new file mode 100644 index 0000000000..6f87d2bb60 --- /dev/null +++ b/templates/image-template-node.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: openstack.k-orc.cloud/v1alpha1 +kind: Image +metadata: + name: node-image +spec: + managementPolicy: managed + resource: + name: flatcar_production + content: + diskFormat: qcow2 + download: + url: ${NODE_IMAGE_URL:="https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_openstack_image.img"} + cloudCredentialsRef: + secretName: ${CLOUD_CONFIG_SECRET:=dev-test-cloud-config} + cloudName: ${OPENSTACK_CLOUD:=capo-e2e} diff --git a/templates/images-template.yaml b/templates/images-template.yaml deleted file mode 100644 index 2d0b9ae7d4..0000000000 --- a/templates/images-template.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -apiVersion: openstack.k-orc.cloud/v1alpha1 -kind: Image -metadata: - name: node-image -spec: - managementPolicy: managed - resource: - name: flatcar_production - content: - diskFormat: qcow2 - download: - url: ${NODE_IMAGE_URL:="https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_openstack_image.img"} - cloudCredentialsRef: - secretName: dev-test-cloud-config - cloudName: capo-e2e ---- -apiVersion: openstack.k-orc.cloud/v1alpha1 -kind: Image -metadata: - name: bastion-image -spec: - managementPolicy: managed - resource: - name: ubuntu-22.04 - content: - diskFormat: qcow2 - download: - url: ${BASTION_IMAGE_URL:="https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img"} - cloudCredentialsRef: - secretName: dev-test-cloud-config - cloudName: capo-e2e diff --git a/test/e2e/data/cni/calico.yaml b/test/e2e/data/cni/calico.yaml index ce62a65fd0..e983e61f11 100644 --- a/test/e2e/data/cni/calico.yaml +++ b/test/e2e/data/cni/calico.yaml @@ -1,3 +1,41 @@ +# This is taken from https://github.com/projectcalico/calico/blob/v3.30.2/manifests/calico.yaml + +# Source: calico/templates/calico-kube-controllers.yaml +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers +--- +# Source: calico/templates/calico-kube-controllers.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-cni-plugin + namespace: kube-system --- # Source: calico/templates/calico-config.yaml # This ConfigMap is used to configure a self-hosted Calico installation. @@ -45,20 +83,16 @@ data: "type": "portmap", "snat": true, "capabilities": {"portMappings": true} - }, - { - "type": "bandwidth", - "capabilities": {"bandwidth": true} } ] } - --- # Source: calico/templates/kdd-crds.yaml - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: bgpconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -67,6 +101,7 @@ spec: listKind: BGPConfigurationList plural: bgpconfigurations singular: bgpconfiguration + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -75,14 +110,19 @@ spec: description: BGPConfiguration contains the configuration for any BGP routing. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -90,135 +130,140 @@ spec: description: BGPConfigurationSpec contains the values of the BGP configuration. properties: asNumber: - description: 'ASNumber is the default AS number used by a node. [Default: - 64512]' + description: 'ASNumber is the default AS number used by a node. [Default: 64512]' format: int32 type: integer bindMode: - description: BindMode indicates whether to listen for BGP connections - on all addresses (None) or only on the node's canonical IP address - Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen - for BGP connections on all addresses. + description: |- + BindMode indicates whether to listen for BGP connections on all addresses (None) + or only on the node's canonical IP address Node.Spec.BGP.IPvXAddress (NodeIP). + Default behaviour is to listen for BGP connections on all addresses. type: string communities: - description: Communities is a list of BGP community values and their - arbitrary names for tagging routes. + description: Communities is a list of BGP community values and their arbitrary names for tagging routes. items: - description: Community contains standard or large community value - and its name. + description: Community contains standard or large community value and its name. properties: name: description: Name given to community value. type: string value: - description: Value must be of format `aa:nn` or `aa:nn:mm`. - For standard community use `aa:nn` format, where `aa` and - `nn` are 16 bit number. For large community use `aa:nn:mm` - format, where `aa`, `nn` and `mm` are 32 bit number. Where, - `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + description: |- + Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number. + For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number. + Where, `aa` is an AS Number, `nn` and `mm` are per-AS identifier. pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ type: string type: object type: array + ignoredInterfaces: + description: IgnoredInterfaces indicates the network interfaces that needs to be excluded when reading device routes. + items: + type: string + type: array listenPort: - description: ListenPort is the port where BGP protocol should listen. - Defaults to 179 + description: ListenPort is the port where BGP protocol should listen. Defaults to 179 maximum: 65535 minimum: 1 type: integer + localWorkloadPeeringIPV4: + description: |- + The virtual IPv4 address of the node with which its local workload is expected to peer. + It is recommended to use a link-local address. + type: string + localWorkloadPeeringIPV6: + description: |- + The virtual IPv6 address of the node with which its local workload is expected to peer. + It is recommended to use a link-local address. + type: string logSeverityScreen: - description: 'LogSeverityScreen is the log severity above which logs - are sent to the stdout. [Default: INFO]' + description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: INFO]' type: string nodeMeshMaxRestartTime: - description: Time to allow for software restart for node-to-mesh peerings. When - specified, this is configured as the graceful restart timeout. When - not specified, the BIRD default of 120s is used. This field can - only be set on the default BGPConfiguration instance and requires - that NodeMesh is enabled + description: |- + Time to allow for software restart for node-to-mesh peerings. When specified, this is configured + as the graceful restart timeout. When not specified, the BIRD default of 120s is used. + This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled type: string nodeMeshPassword: - description: Optional BGP password for full node-to-mesh peerings. - This field can only be set on the default BGPConfiguration instance - and requires that NodeMesh is enabled + description: |- + Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled properties: secretKeyRef: description: Selects a key of a secret in the node pod's namespace. properties: key: - description: The key of the secret to select from. Must be - a valid secret key. + description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: - description: Specify whether the Secret or its key must be - defined + description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nodeToNodeMeshEnabled: - description: 'NodeToNodeMeshEnabled sets whether full node to node - BGP mesh is enabled. [Default: true]' + description: 'NodeToNodeMeshEnabled sets whether full node to node BGP mesh is enabled. [Default: true]' type: boolean prefixAdvertisements: - description: PrefixAdvertisements contains per-prefix advertisement - configuration. + description: PrefixAdvertisements contains per-prefix advertisement configuration. items: - description: PrefixAdvertisement configures advertisement properties - for the specified CIDR. + description: PrefixAdvertisement configures advertisement properties for the specified CIDR. properties: cidr: description: CIDR for which properties should be advertised. type: string communities: - description: Communities can be list of either community names - already defined in `Specs.Communities` or community value - of format `aa:nn` or `aa:nn:mm`. For standard community use - `aa:nn` format, where `aa` and `nn` are 16 bit number. For - large community use `aa:nn:mm` format, where `aa`, `nn` and - `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and - `mm` are per-AS identifier. + description: |- + Communities can be list of either community names already defined in `Specs.Communities` or community value of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number. + For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number. + Where,`aa` is an AS Number, `nn` and `mm` are per-AS identifier. items: type: string type: array type: object type: array serviceClusterIPs: - description: ServiceClusterIPs are the CIDR blocks from which service - cluster IPs are allocated. If specified, Calico will advertise these - blocks, as well as any cluster IPs within them. + description: |- + ServiceClusterIPs are the CIDR blocks from which service cluster IPs are allocated. + If specified, Calico will advertise these blocks, as well as any cluster IPs within them. items: - description: ServiceClusterIPBlock represents a single allowed ClusterIP - CIDR block. + description: ServiceClusterIPBlock represents a single allowed ClusterIP CIDR block. properties: cidr: type: string type: object type: array serviceExternalIPs: - description: ServiceExternalIPs are the CIDR blocks for Kubernetes - Service External IPs. Kubernetes Service ExternalIPs will only be - advertised if they are within one of these blocks. + description: |- + ServiceExternalIPs are the CIDR blocks for Kubernetes Service External IPs. + Kubernetes Service ExternalIPs will only be advertised if they are within one of these blocks. items: - description: ServiceExternalIPBlock represents a single allowed - External IP CIDR block. + description: ServiceExternalIPBlock represents a single allowed External IP CIDR block. properties: cidr: type: string type: object type: array serviceLoadBalancerIPs: - description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes - Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress - IPs will only be advertised if they are within one of these blocks. + description: |- + ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes Service LoadBalancer IPs. + Kubernetes Service status.LoadBalancer.Ingress IPs will only be advertised if they are within one of these blocks. items: - description: ServiceLoadBalancerIPBlock represents a single allowed - LoadBalancer IP CIDR block. + description: ServiceLoadBalancerIPBlock represents a single allowed LoadBalancer IP CIDR block. properties: cidr: type: string @@ -228,17 +273,187 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: bgpfilters.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPFilter + listKind: BGPFilterList + plural: bgpfilters + singular: bgpfilter + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BGPFilterSpec contains the IPv4 and IPv6 filter rules of the BGP Filter. + properties: + exportV4: + description: The ordered set of IPv4 BGPFilter rules acting on exporting routes to a peer. + items: + description: BGPFilterRuleV4 defines a BGP filter rule consisting a single IPv4 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object + source: + type: string + required: + - action + type: object + type: array + exportV6: + description: The ordered set of IPv6 BGPFilter rules acting on exporting routes to a peer. + items: + description: BGPFilterRuleV6 defines a BGP filter rule consisting a single IPv6 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object + source: + type: string + required: + - action + type: object + type: array + importV4: + description: The ordered set of IPv4 BGPFilter rules acting on importing routes from a peer. + items: + description: BGPFilterRuleV4 defines a BGP filter rule consisting a single IPv4 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object + source: + type: string + required: + - action + type: object + type: array + importV6: + description: The ordered set of IPv6 BGPFilter rules acting on importing routes from a peer. + items: + description: BGPFilterRuleV6 defines a BGP filter rule consisting a single IPv6 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object + source: + type: string + required: + - action + type: object + type: array + type: object + type: object + served: true + storage: true +--- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: bgppeers.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -247,6 +462,7 @@ spec: listKind: BGPPeerList plural: bgppeers singular: bgppeer + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -254,14 +470,19 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -272,92 +493,136 @@ spec: description: The AS Number of the peer. format: int32 type: integer + filters: + description: The ordered set of BGPFilters applied on this BGP peer. + items: + type: string + type: array keepOriginalNextHop: - description: Option to keep the original nexthop field when routes - are sent to a BGP Peer. Setting "true" configures the selected BGP - Peers node to use the "next hop keep;" instead of "next hop self;"(default) - in the specific branch of the Node on "bird.cfg". + description: |- + Option to keep the original nexthop field when routes are sent to a BGP Peer. + Setting "true" configures the selected BGP Peers node to use the "next hop keep;" + instead of "next hop self;"(default) in the specific branch of the Node on "bird.cfg". + Note: that this field is deprecated. Users should use the NextHopMode field to control + the next hop attribute for a BGP peer. type: boolean + localWorkloadSelector: + description: |- + Selector for the local workload that the node should peer with. When this is set, the peerSelector and peerIP fields must be empty, + and the ASNumber must not be empty. + type: string maxRestartTime: - description: Time to allow for software restart. When specified, - this is configured as the graceful restart timeout. When not specified, - the BIRD default of 120s is used. + description: |- + Time to allow for software restart. When specified, this is configured as the graceful + restart timeout. When not specified, the BIRD default of 120s is used. + type: string + nextHopMode: + allOf: + - enum: + - Auto + - Self + - Keep + - enum: + - Auto + - Self + - Keep + description: |- + NextHopMode defines the method of calculating the next hop attribute for received routes. + This replaces and expands the deprecated KeepOriginalNextHop field. + Users should use this setting to control the next hop attribute for a BGP peer. + When this is set, the value of the KeepOriginalNextHop field is ignored. + if neither keepOriginalNextHop or nextHopMode is specified, BGP's default behaviour is used. + Set it to “Auto” to apply BGP’s default behaviour. + Set it to "Self" to configure "next hop self;" in "bird.cfg". + Set it to "Keep" to configure "next hop keep;" in "bird.cfg". type: string node: - description: The node name identifying the Calico node instance that - is targeted by this peer. If this is not set, and no nodeSelector - is specified, then this BGP peer selects all nodes in the cluster. + description: |- + The node name identifying the Calico node instance that is targeted by this peer. + If this is not set, and no nodeSelector is specified, then this BGP peer selects all + nodes in the cluster. type: string nodeSelector: - description: Selector for the nodes that should have this peering. When - this is set, the Node field must be empty. + description: |- + Selector for the nodes that should have this peering. When this is set, the Node + field must be empty. type: string numAllowedLocalASNumbers: - description: Maximum number of local AS numbers that are allowed in - the AS path for received routes. This removes BGP loop prevention - and should only be used if absolutely necesssary. + description: |- + Maximum number of local AS numbers that are allowed in the AS path for received routes. + This removes BGP loop prevention and should only be used if absolutely necessary. format: int32 type: integer password: - description: Optional BGP password for the peerings generated by this - BGPPeer resource. + description: Optional BGP password for the peerings generated by this BGPPeer resource. properties: secretKeyRef: description: Selects a key of a secret in the node pod's namespace. properties: key: - description: The key of the secret to select from. Must be - a valid secret key. + description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: - description: Specify whether the Secret or its key must be - defined + description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object peerIP: - description: The IP address of the peer followed by an optional port - number to peer with. If port number is given, format should be `[]:port` - or `:` for IPv4. If optional port number is not set, - and this peer IP and ASNumber belongs to a calico/node with ListenPort - set in BGPConfiguration, then we use that port to peer. + description: |- + The IP address of the peer followed by an optional port number to peer with. + If port number is given, format should be `[]:port` or `:` for IPv4. + If optional port number is not set, and this peer IP and ASNumber belongs to a calico/node + with ListenPort set in BGPConfiguration, then we use that port to peer. type: string peerSelector: - description: Selector for the remote nodes to peer with. When this - is set, the PeerIP and ASNumber fields must be empty. For each - peering between the local node and selected remote nodes, we configure - an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, - and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The - remote AS number comes from the remote node's NodeBGPSpec.ASNumber, - or the global default if that is not set. + description: |- + Selector for the remote nodes to peer with. When this is set, the PeerIP and + ASNumber fields must be empty. For each peering between the local node and + selected remote nodes, we configure an IPv4 peering if both ends have + NodeBGPSpec.IPv4Address specified, and an IPv6 peering if both ends have + NodeBGPSpec.IPv6Address specified. The remote AS number comes from the remote + node's NodeBGPSpec.ASNumber, or the global default if that is not set. + type: string + reachableBy: + description: |- + Add an exact, i.e. /32, static route toward peer IP in order to prevent route flapping. + ReachableBy contains the address of the gateway which peer can be reached by. type: string sourceAddress: - description: Specifies whether and how to configure a source address - for the peerings generated by this BGPPeer resource. Default value - "UseNodeIP" means to configure the node IP as the source address. "None" - means not to configure a source address. + description: |- + Specifies whether and how to configure a source address for the peerings generated by + this BGPPeer resource. Default value "UseNodeIP" means to configure the node IP as the + source address. "None" means not to configure a source address. type: string + ttlSecurity: + description: |- + TTLSecurity enables the generalized TTL security mechanism (GTSM) which protects against spoofed packets by + ignoring received packets with a smaller than expected TTL value. The provided value is the number of hops + (edges) between the peers. + type: integer type: object type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: blockaffinities.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -366,6 +631,7 @@ spec: listKind: BlockAffinityList plural: blockaffinities singular: blockaffinity + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -373,25 +639,30 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: BlockAffinitySpec contains the specification for a BlockAffinity - resource. + description: BlockAffinitySpec contains the specification for a BlockAffinity resource. properties: cidr: type: string deleted: - description: Deleted indicates that this block affinity is being deleted. + description: |- + Deleted indicates that this block affinity is being deleted. This field is a string for compatibility with older releases that mistakenly treat this field as a string. type: string @@ -399,6 +670,8 @@ spec: type: string state: type: string + type: + type: string required: - cidr - deleted @@ -408,20 +681,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: (devel) - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.17.3 name: caliconodestatuses.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -430,6 +696,7 @@ spec: listKind: CalicoNodeStatusList plural: caliconodestatuses singular: caliconodestatus + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -437,41 +704,45 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus - resource. + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus resource. properties: classes: - description: Classes declares the types of information to monitor - for this calico/node, and allows for selective status reporting - about certain subsets of information. + description: |- + Classes declares the types of information to monitor for this calico/node, + and allows for selective status reporting about certain subsets of information. items: type: string type: array node: - description: The node name identifies the Calico node instance for - node status. + description: The node name identifies the Calico node instance for node status. type: string updatePeriodSeconds: - description: UpdatePeriodSeconds is the period at which CalicoNodeStatus - should be updated. Set to 0 to disable CalicoNodeStatus refresh. - Maximum update period is one day. + description: |- + UpdatePeriodSeconds is the period at which CalicoNodeStatus should be updated. + Set to 0 to disable CalicoNodeStatus refresh. Maximum update period is one day. format: int32 type: integer type: object status: - description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + description: |- + CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. No validation needed for status since it is updated by Calico. properties: agent: @@ -481,12 +752,10 @@ spec: description: BIRDV4 represents the latest observed status of bird4. properties: lastBootTime: - description: LastBootTime holds the value of lastBootTime - from bird.ctl output. + description: LastBootTime holds the value of lastBootTime from bird.ctl output. type: string lastReconfigurationTime: - description: LastReconfigurationTime holds the value of lastReconfigTime - from bird.ctl output. + description: LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output. type: string routerID: description: Router ID used by bird. @@ -502,12 +771,10 @@ spec: description: BIRDV6 represents the latest observed status of bird6. properties: lastBootTime: - description: LastBootTime holds the value of lastBootTime - from bird.ctl output. + description: LastBootTime holds the value of lastBootTime from bird.ctl output. type: string lastReconfigurationTime: - description: LastReconfigurationTime holds the value of lastReconfigTime - from bird.ctl output. + description: LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output. type: string routerID: description: Router ID used by bird. @@ -538,12 +805,10 @@ spec: peersV4: description: PeersV4 represents IPv4 BGP peers status on the node. items: - description: CalicoNodePeer contains the status of BGP peers - on the node. + description: CalicoNodePeer contains the status of BGP peers on the node. properties: peerIP: - description: IP address of the peer whose condition we are - reporting. + description: IP address of the peer whose condition we are reporting. type: string since: description: Since the state or reason last changed. @@ -552,21 +817,19 @@ spec: description: State is the BGP session state. type: string type: - description: Type indicates whether this peer is configured - via the node-to-node mesh, or via en explicit global or - per-node BGPPeer object. + description: |- + Type indicates whether this peer is configured via the node-to-node mesh, + or via en explicit global or per-node BGPPeer object. type: string type: object type: array peersV6: description: PeersV6 represents IPv6 BGP peers status on the node. items: - description: CalicoNodePeer contains the status of BGP peers - on the node. + description: CalicoNodePeer contains the status of BGP peers on the node. properties: peerIP: - description: IP address of the peer whose condition we are - reporting. + description: IP address of the peer whose condition we are reporting. type: string since: description: Since the state or reason last changed. @@ -575,9 +838,9 @@ spec: description: State is the BGP session state. type: string type: - description: Type indicates whether this peer is configured - via the node-to-node mesh, or via en explicit global or - per-node BGPPeer object. + description: |- + Type indicates whether this peer is configured via the node-to-node mesh, + or via en explicit global or per-node BGPPeer object. type: string type: object type: array @@ -588,21 +851,19 @@ spec: - numberNotEstablishedV6 type: object lastUpdated: - description: LastUpdated is a timestamp representing the server time - when CalicoNodeStatus object last updated. It is represented in - RFC3339 form and is in UTC. + description: |- + LastUpdated is a timestamp representing the server time when CalicoNodeStatus object + last updated. It is represented in RFC3339 form and is in UTC. format: date-time nullable: true type: string routes: - description: Routes reports routes known to the Calico BGP daemon - on the node. + description: Routes reports routes known to the Calico BGP daemon on the node. properties: routesV4: description: RoutesV4 represents IPv4 routes on the node. items: - description: CalicoNodeRoute contains the status of BGP routes - on the node. + description: CalicoNodeRoute contains the status of BGP routes on the node. properties: destination: description: Destination of the route. @@ -614,29 +875,24 @@ spec: description: Interface for the destination type: string learnedFrom: - description: LearnedFrom contains information regarding - where this route originated. + description: LearnedFrom contains information regarding where this route originated. properties: peerIP: - description: If sourceType is NodeMesh or BGPPeer, IP - address of the router that sent us this route. + description: If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route. type: string sourceType: - description: Type of the source where a route is learned - from. + description: Type of the source where a route is learned from. type: string type: object type: - description: Type indicates if the route is being used for - forwarding or not. + description: Type indicates if the route is being used for forwarding or not. type: string type: object type: array routesV6: description: RoutesV6 represents IPv6 routes on the node. items: - description: CalicoNodeRoute contains the status of BGP routes - on the node. + description: CalicoNodeRoute contains the status of BGP routes on the node. properties: destination: description: Destination of the route. @@ -648,21 +904,17 @@ spec: description: Interface for the destination type: string learnedFrom: - description: LearnedFrom contains information regarding - where this route originated. + description: LearnedFrom contains information regarding where this route originated. properties: peerIP: - description: If sourceType is NodeMesh or BGPPeer, IP - address of the router that sent us this route. + description: If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route. type: string sourceType: - description: Type of the source where a route is learned - from. + description: Type of the source where a route is learned from. type: string type: object type: - description: Type indicates if the route is being used for - forwarding or not. + description: Type indicates if the route is being used for forwarding or not. type: string type: object type: array @@ -671,17 +923,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: clusterinformations.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -690,6 +938,7 @@ spec: listKind: ClusterInformationList plural: clusterinformations singular: clusterinformation + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -698,24 +947,27 @@ spec: description: ClusterInformation contains the cluster specific information. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: ClusterInformationSpec contains the values of describing - the cluster. + description: ClusterInformationSpec contains the values of describing the cluster. properties: calicoVersion: - description: CalicoVersion is the version of Calico that the cluster - is running + description: CalicoVersion is the version of Calico that the cluster is running type: string clusterGUID: description: ClusterGUID is the GUID of the cluster @@ -724,9 +976,9 @@ spec: description: ClusterType describes the type of the cluster type: string datastoreReady: - description: DatastoreReady is used during significant datastore migrations - to signal to components such as Felix that it should wait before - accessing the datastore. + description: |- + DatastoreReady is used during significant datastore migrations to signal to components + such as Felix that it should wait before accessing the datastore. type: boolean variant: description: Variant declares which variant of Calico should be active. @@ -735,17 +987,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: felixconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -754,6 +1002,7 @@ spec: listKind: FelixConfigurationList plural: felixconfigurations singular: felixconfiguration + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -762,14 +1011,19 @@ spec: description: Felix Configuration contains the configuration for Felix. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -777,222 +1031,501 @@ spec: description: FelixConfigurationSpec contains the values of the Felix configuration. properties: allowIPIPPacketsFromWorkloads: - description: 'AllowIPIPPacketsFromWorkloads controls whether Felix - will add a rule to drop IPIP encapsulated traffic from workloads - [Default: false]' + description: |- + AllowIPIPPacketsFromWorkloads controls whether Felix will add a rule to drop IPIP encapsulated traffic + from workloads. [Default: false] type: boolean allowVXLANPacketsFromWorkloads: - description: 'AllowVXLANPacketsFromWorkloads controls whether Felix - will add a rule to drop VXLAN encapsulated traffic from workloads - [Default: false]' + description: |- + AllowVXLANPacketsFromWorkloads controls whether Felix will add a rule to drop VXLAN encapsulated traffic + from workloads. [Default: false] type: boolean awsSrcDstCheck: - description: 'Set source-destination-check on AWS EC2 instances. Accepted - value must be one of "DoNothing", "Enable" or "Disable". [Default: - DoNothing]' + description: |- + AWSSrcDstCheck controls whether Felix will try to change the "source/dest check" setting on the EC2 instance + on which it is running. A value of "Disable" will try to disable the source/dest check. Disabling the check + allows for sending workload traffic without encapsulation within the same AWS subnet. + [Default: DoNothing] enum: - DoNothing - Enable - Disable type: string + bpfCTLBLogFilter: + description: |- + BPFCTLBLogFilter specifies, what is logged by connect time load balancer when BPFLogLevel is + debug. Currently has to be specified as 'all' when BPFLogFilters is set + to see CTLB logs. + [Default: unset - means logs are emitted when BPFLogLevel id debug and BPFLogFilters not set.] + type: string + bpfConnectTimeLoadBalancing: + description: |- + BPFConnectTimeLoadBalancing when in BPF mode, controls whether Felix installs the connect-time load + balancer. The connect-time load balancer is required for the host to be able to reach Kubernetes services + and it improves the performance of pod-to-service connections.When set to TCP, connect time load balancing + is available only for services with TCP ports. [Default: TCP] + enum: + - TCP + - Enabled + - Disabled + type: string bpfConnectTimeLoadBalancingEnabled: - description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, - controls whether Felix installs the connection-time load balancer. The - connect-time load balancer is required for the host to be able to - reach Kubernetes services and it improves the performance of pod-to-service - connections. The only reason to disable it is for debugging purposes. [Default: - true]' + description: |- + BPFConnectTimeLoadBalancingEnabled when in BPF mode, controls whether Felix installs the connection-time load + balancer. The connect-time load balancer is required for the host to be able to reach Kubernetes services + and it improves the performance of pod-to-service connections. The only reason to disable it is for debugging + purposes. + + Deprecated: Use BPFConnectTimeLoadBalancing [Default: true] type: boolean + bpfConntrackLogLevel: + description: |- + BPFConntrackLogLevel controls the log level of the BPF conntrack cleanup program, which runs periodically + to clean up expired BPF conntrack entries. + [Default: Off]. + enum: + - "Off" + - Debug + type: string + bpfConntrackMode: + description: |- + BPFConntrackCleanupMode controls how BPF conntrack entries are cleaned up. `Auto` will use a BPF program if supported, + falling back to userspace if not. `Userspace` will always use the userspace cleanup code. `BPFProgram` will + always use the BPF program (failing if not supported). + [Default: Auto] + enum: + - Auto + - Userspace + - BPFProgram + type: string + bpfConntrackTimeouts: + description: |- + BPFConntrackTimers overrides the default values for the specified conntrack timer if + set. Each value can be either a duration or `Auto` to pick the value from + a Linux conntrack timeout. + + Configurable timers are: CreationGracePeriod, TCPSynSent, + TCPEstablished, TCPFinsSeen, TCPResetSeen, UDPTimeout, GenericTimeout, + ICMPTimeout. + + Unset values are replaced by the default values with a warning log for + incorrect values. + properties: + creationGracePeriod: + description: |2- + CreationGracePeriod gives a generic grace period to new connection + before they are considered for cleanup [Default: 10s]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + genericTimeout: + description: |- + GenericTimeout controls how long it takes before considering this + entry for cleanup after the connection became idle. If set to 'Auto', the + value from nf_conntrack_generic_timeout is used. If nil, Calico uses its + own default value. [Default: 10m]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + icmpTimeout: + description: |- + ICMPTimeout controls how long it takes before considering this + entry for cleanup after the connection became idle. If set to 'Auto', the + value from nf_conntrack_icmp_timeout is used. If nil, Calico uses its + own default value. [Default: 5s]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + tcpEstablished: + description: |- + TCPEstablished controls how long it takes before considering this entry for + cleanup after the connection became idle. If set to 'Auto', the + value from nf_conntrack_tcp_timeout_established is used. If nil, Calico uses + its own default value. [Default: 1h]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + tcpFinsSeen: + description: |- + TCPFinsSeen controls how long it takes before considering this entry for + cleanup after the connection was closed gracefully. If set to 'Auto', the + value from nf_conntrack_tcp_timeout_time_wait is used. If nil, Calico uses + its own default value. [Default: Auto]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + tcpResetSeen: + description: |- + TCPResetSeen controls how long it takes before considering this entry for + cleanup after the connection was aborted. If nil, Calico uses its own + default value. [Default: 40s]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + tcpSynSent: + description: |- + TCPSynSent controls how long it takes before considering this entry for + cleanup after the last SYN without a response. If set to 'Auto', the + value from nf_conntrack_tcp_timeout_syn_sent is used. If nil, Calico uses + its own default value. [Default: 20s]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + udpTimeout: + description: |- + UDPTimeout controls how long it takes before considering this entry for + cleanup after the connection became idle. If nil, Calico uses its own + default value. [Default: 60s]. + pattern: ^(([0-9]*(\.[0-9]*)?(ms|s|h|m|us)+)+|Auto)$ + type: string + type: object + bpfDSROptoutCIDRs: + description: |- + BPFDSROptoutCIDRs is a list of CIDRs which are excluded from DSR. That is, clients + in those CIDRs will access service node ports as if BPFExternalServiceMode was set to + Tunnel. + items: + type: string + type: array bpfDataIfacePattern: - description: BPFDataIfacePattern is a regular expression that controls - which interfaces Felix should attach BPF programs to in order to - catch traffic to/from the network. This needs to match the interfaces - that Calico workload traffic flows over as well as any interfaces - that handle incoming traffic to nodeports and services from outside - the cluster. It should not match the workload interfaces (usually - named cali...). + description: |- + BPFDataIfacePattern is a regular expression that controls which interfaces Felix should attach BPF programs to + in order to catch traffic to/from the network. This needs to match the interfaces that Calico workload traffic + flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the + cluster. It should not match the workload interfaces (usually named cali...) or any other special device managed + by Calico itself (e.g., tunnels). + type: string + bpfDisableGROForIfaces: + description: |- + BPFDisableGROForIfaces is a regular expression that controls which interfaces Felix should disable the + Generic Receive Offload [GRO] option. It should not match the workload interfaces (usually named cali...). type: string bpfDisableUnprivileged: - description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled - sysctl to disable unprivileged use of BPF. This ensures that unprivileged - users cannot access Calico''s BPF maps and cannot insert their own - BPF programs to interfere with Calico''s. [Default: true]' + description: |- + BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable + unprivileged use of BPF. This ensures that unprivileged users cannot access Calico's BPF maps and + cannot insert their own BPF programs to interfere with Calico's. [Default: true] type: boolean bpfEnabled: - description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. - [Default: false]' + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. [Default: false]' type: boolean bpfEnforceRPF: - description: 'BPFEnforceRPF enforce strict RPF on all interfaces with - BPF programs regardless of what is the per-interfaces or global - setting. Possible values are Disabled or Strict. [Default: Strict]' + description: |- + BPFEnforceRPF enforce strict RPF on all host interfaces with BPF programs regardless of + what is the per-interfaces or global setting. Possible values are Disabled, Strict + or Loose. [Default: Loose] + pattern: ^(?i)(Disabled|Strict|Loose)?$ type: string + bpfExcludeCIDRsFromNAT: + description: |- + BPFExcludeCIDRsFromNAT is a list of CIDRs that are to be excluded from NAT + resolution so that host can handle them. A typical usecase is node local + DNS cache. + items: + type: string + type: array + bpfExportBufferSizeMB: + description: |- + BPFExportBufferSizeMB in BPF mode, controls the buffer size used for sending BPF events to felix. + [Default: 1] + type: integer bpfExtToServiceConnmark: - description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit - mark that is set on connections from an external client to a local - service. This mark allows us to control how packets of that connection - are routed within the host and how is routing intepreted by RPF - check. [Default: 0]' + description: |- + BPFExtToServiceConnmark in BPF mode, controls a 32bit mark that is set on connections from an + external client to a local service. This mark allows us to control how packets of that + connection are routed within the host and how is routing interpreted by RPF check. [Default: 0] type: integer bpfExternalServiceMode: - description: 'BPFExternalServiceMode in BPF mode, controls how connections - from outside the cluster to services (node ports and cluster IPs) - are forwarded to remote workloads. If set to "Tunnel" then both - request and response traffic is tunneled to the remote node. If - set to "DSR", the request traffic is tunneled but the response traffic - is sent directly from the remote node. In "DSR" mode, the remote - node appears to use the IP of the ingress node; this requires a - permissive L2 network. [Default: Tunnel]' + description: |- + BPFExternalServiceMode in BPF mode, controls how connections from outside the cluster to services (node ports + and cluster IPs) are forwarded to remote workloads. If set to "Tunnel" then both request and response traffic + is tunneled to the remote node. If set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress + node; this requires a permissive L2 network. [Default: Tunnel] + pattern: ^(?i)(Tunnel|DSR)?$ + type: string + bpfForceTrackPacketsFromIfaces: + description: |- + BPFForceTrackPacketsFromIfaces in BPF mode, forces traffic from these interfaces + to skip Calico's iptables NOTRACK rule, allowing traffic from those interfaces to be + tracked by Linux conntrack. Should only be used for interfaces that are not used for + the Calico fabric. For example, a docker bridge device for non-Calico-networked + containers. [Default: docker+] + items: + type: string + type: array + bpfHostConntrackBypass: + description: |- + BPFHostConntrackBypass Controls whether to bypass Linux conntrack in BPF mode for + workloads and services. [Default: true - bypass Linux conntrack] + type: boolean + bpfHostNetworkedNATWithoutCTLB: + description: |- + BPFHostNetworkedNATWithoutCTLB when in BPF mode, controls whether Felix does a NAT without CTLB. This along with BPFConnectTimeLoadBalancing + determines the CTLB behavior. [Default: Enabled] + enum: + - Enabled + - Disabled type: string bpfKubeProxyEndpointSlicesEnabled: - description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls - whether Felix's embedded kube-proxy accepts EndpointSlices or not. + description: |- + BPFKubeProxyEndpointSlicesEnabled is deprecated and has no effect. BPF + kube-proxy always accepts endpoint slices. This option will be removed in + the next release. type: boolean bpfKubeProxyIptablesCleanupEnabled: - description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF - mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s - iptables chains. Should only be enabled if kube-proxy is not running. [Default: - true]' + description: |- + BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF mode, Felix will proactively clean up the upstream + Kubernetes kube-proxy's iptables chains. Should only be enabled if kube-proxy is not running. [Default: true] type: boolean bpfKubeProxyMinSyncPeriod: - description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the - minimum time between updates to the dataplane for Felix''s embedded - kube-proxy. Lower values give reduced set-up latency. Higher values - reduce Felix CPU usage by batching up more work. [Default: 1s]' + description: |- + BPFKubeProxyMinSyncPeriod, in BPF mode, controls the minimum time between updates to the dataplane for Felix's + embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by + batching up more work. [Default: 1s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string + bpfL3IfacePattern: + description: |- + BPFL3IfacePattern is a regular expression that allows to list tunnel devices like wireguard or vxlan (i.e., L3 devices) + in addition to BPFDataIfacePattern. That is, tunnel interfaces not created by Calico, that Calico workload traffic flows + over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. + type: string + bpfLogFilters: + additionalProperties: + type: string + description: |- + BPFLogFilters is a map of key=values where the value is + a pcap filter expression and the key is an interface name with 'all' + denoting all interfaces, 'weps' all workload endpoints and 'heps' all host + endpoints. + + When specified as an env var, it accepts a comma-separated list of + key=values. + [Default: unset - means all debug logs are emitted] + type: object bpfLogLevel: - description: 'BPFLogLevel controls the log level of the BPF programs - when in BPF dataplane mode. One of "Off", "Info", or "Debug". The - logs are emitted to the BPF trace pipe, accessible with the command - `tc exec bpf debug`. [Default: Off].' + description: |- + BPFLogLevel controls the log level of the BPF programs when in BPF dataplane mode. One of "Off", "Info", or + "Debug". The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. + [Default: Off]. + pattern: ^(?i)(Off|Info|Debug)?$ type: string bpfMapSizeConntrack: - description: 'BPFMapSizeConntrack sets the size for the conntrack - map. This map must be large enough to hold an entry for each active - connection. Warning: changing the size of the conntrack map can - cause disruption.' + description: |- + BPFMapSizeConntrack sets the size for the conntrack map. This map must be large enough to hold + an entry for each active connection. Warning: changing the size of the conntrack map can cause disruption. + type: integer + bpfMapSizeConntrackCleanupQueue: + description: |- + BPFMapSizeConntrackCleanupQueue sets the size for the map used to hold NAT conntrack entries that are queued + for cleanup. This should be big enough to hold all the NAT entries that expire within one cleanup interval. + minimum: 1 type: integer + bpfMapSizeConntrackScaling: + description: |- + BPFMapSizeConntrackScaling controls whether and how we scale the conntrack map size depending + on its usage. 'Disabled' make the size stay at the default or whatever is set by + BPFMapSizeConntrack*. 'DoubleIfFull' doubles the size when the map is pretty much full even + after cleanups. [Default: DoubleIfFull] + pattern: ^(?i)(Disabled|DoubleIfFull)?$ + type: string bpfMapSizeIPSets: - description: BPFMapSizeIPSets sets the size for ipsets map. The IP - sets map must be large enough to hold an entry for each endpoint - matched by every selector in the source/destination matches in network - policy. Selectors such as "all()" can result in large numbers of - entries (one entry per endpoint in that case). + description: |- + BPFMapSizeIPSets sets the size for ipsets map. The IP sets map must be large enough to hold an entry + for each endpoint matched by every selector in the source/destination matches in network policy. Selectors + such as "all()" can result in large numbers of entries (one entry per endpoint in that case). + type: integer + bpfMapSizeIfState: + description: |- + BPFMapSizeIfState sets the size for ifstate map. The ifstate map must be large enough to hold an entry + for each device (host + workloads) on a host. type: integer bpfMapSizeNATAffinity: + description: |- + BPFMapSizeNATAffinity sets the size of the BPF map that stores the affinity of a connection (for services that + enable that feature. type: integer bpfMapSizeNATBackend: - description: BPFMapSizeNATBackend sets the size for nat back end map. - This is the total number of endpoints. This is mostly more than - the size of the number of services. + description: |- + BPFMapSizeNATBackend sets the size for NAT back end map. + This is the total number of endpoints. This is mostly + more than the size of the number of services. type: integer bpfMapSizeNATFrontend: - description: BPFMapSizeNATFrontend sets the size for nat front end - map. FrontendMap should be large enough to hold an entry for each - nodeport, external IP and each port in each service. + description: |- + BPFMapSizeNATFrontend sets the size for NAT front end map. + FrontendMap should be large enough to hold an entry for each nodeport, + external IP and each port in each service. + type: integer + bpfMapSizePerCpuConntrack: + description: |- + BPFMapSizePerCPUConntrack determines the size of conntrack map based on the number of CPUs. If set to a + non-zero value, overrides BPFMapSizeConntrack with `BPFMapSizePerCPUConntrack * (Number of CPUs)`. + This map must be large enough to hold an entry for each active connection. Warning: changing the size of the + conntrack map can cause disruption. type: integer bpfMapSizeRoute: - description: BPFMapSizeRoute sets the size for the routes map. The - routes map should be large enough to hold one entry per workload - and a handful of entries per host (enough to cover its own IPs and + description: |- + BPFMapSizeRoute sets the size for the routes map. The routes map should be large enough + to hold one entry per workload and a handful of entries per host (enough to cover its own IPs and tunnel IPs). type: integer bpfPSNATPorts: anyOf: - type: integer - type: string - description: 'BPFPSNATPorts sets the range from which we randomly - pick a port if there is a source port collision. This should be - within the ephemeral range as defined by RFC 6056 (1024–65535) and - preferably outside the ephemeral ranges used by common operating - systems. Linux uses 32768–60999, while others mostly use the IANA - defined range 49152–65535. It is not necessarily a problem if this - range overlaps with the operating systems. Both ends of the range - are inclusive. [Default: 20000:29999]' + description: |- + BPFPSNATPorts sets the range from which we randomly pick a port if there is a source port + collision. This should be within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating systems. Linux uses + 32768–60999, while others mostly use the IANA defined range 49152–65535. It is not necessarily + a problem if this range overlaps with the operating systems. Both ends of the range are + inclusive. [Default: 20000:29999] pattern: ^.* x-kubernetes-int-or-string: true + bpfPolicyDebugEnabled: + description: |- + BPFPolicyDebugEnabled when true, Felix records detailed information + about the BPF policy programs, which can be examined with the calico-bpf command-line tool. + type: boolean + bpfProfiling: + description: |- + BPFProfiling controls profiling of BPF programs. At the monent, it can be + Disabled or Enabled. [Default: Disabled] + enum: + - Enabled + - Disabled + type: string + bpfRedirectToPeer: + description: |- + BPFRedirectToPeer controls which whether it is allowed to forward straight to the + peer side of the workload devices. It is allowed for any host L2 devices by default + (L2Only), but it breaks TCP dump on the host side of workload device as it bypasses + it on ingress. Value of Enabled also allows redirection from L3 host devices like + IPIP tunnel or Wireguard directly to the peer side of the workload's device. This + makes redirection faster, however, it breaks tools like tcpdump on the peer side. + Use Enabled with caution. [Default: L2Only] + enum: + - Enabled + - Disabled + - L2Only + type: string chainInsertMode: - description: 'ChainInsertMode controls whether Felix hooks the kernel''s - top-level iptables chains by inserting a rule at the top of the - chain or by appending a rule at the bottom. insert is the safe default - since it prevents Calico''s rules from being bypassed. If you switch - to append mode, be sure that the other rules in the chains signal - acceptance by falling through to the Calico rules, otherwise the - Calico policy will be bypassed. [Default: insert]' + description: |- + ChainInsertMode controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule + at the top of the chain or by appending a rule at the bottom. insert is the safe default since it prevents + Calico's rules from being bypassed. If you switch to append mode, be sure that the other rules in the chains + signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. + [Default: insert] + pattern: ^(?i)(Insert|Append)?$ type: string dataplaneDriver: - description: DataplaneDriver filename of the external dataplane driver - to use. Only used if UseInternalDataplaneDriver is set to false. + description: |- + DataplaneDriver filename of the external dataplane driver to use. Only used if UseInternalDataplaneDriver + is set to false. type: string dataplaneWatchdogTimeout: - description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout - used for Felix''s (internal) dataplane driver. Increase this value - if you experience spurious non-ready or non-live events when Felix - is under heavy load. Decrease the value to get felix to report non-live - or non-ready more quickly. [Default: 90s]' + description: |- + DataplaneWatchdogTimeout is the readiness/liveness timeout used for Felix's (internal) dataplane driver. + Deprecated: replaced by the generic HealthTimeoutOverrides. type: string debugDisableLogDropping: + description: |- + DebugDisableLogDropping disables the dropping of log messages when the log buffer is full. This can + significantly impact performance if log write-out is a bottleneck. [Default: false] type: boolean + debugHost: + description: |- + DebugHost is the host IP or hostname to bind the debug port to. Only used + if DebugPort is set. [Default:localhost] + type: string debugMemoryProfilePath: + description: DebugMemoryProfilePath is the path to write the memory profile to when triggered by signal. type: string + debugPort: + description: |- + DebugPort if set, enables Felix's debug HTTP port, which allows memory and CPU profiles + to be retrieved. The debug port is not secure, it should not be exposed to the internet. + type: integer debugSimulateCalcGraphHangAfter: + description: |- + DebugSimulateCalcGraphHangAfter is used to simulate a hang in the calculation graph after the specified duration. + This is useful in tests of the watchdog system only! + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + debugSimulateDataplaneApplyDelay: + description: |- + DebugSimulateDataplaneApplyDelay adds an artificial delay to every dataplane operation. This is useful for + simulating a heavily loaded system for test purposes only. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string debugSimulateDataplaneHangAfter: + description: |- + DebugSimulateDataplaneHangAfter is used to simulate a hang in the dataplane after the specified duration. + This is useful in tests of the watchdog system only! + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string defaultEndpointToHostAction: - description: 'DefaultEndpointToHostAction controls what happens to - traffic that goes from a workload endpoint to the host itself (after - the traffic hits the endpoint egress policy). By default Calico - blocks traffic from workload endpoints to the host itself with an - iptables "DROP" action. If you want to allow some or all traffic - from endpoint to host, set this parameter to RETURN or ACCEPT. Use - RETURN if you have your own rules in the iptables "INPUT" chain; - Calico will insert its rules at the top of that chain, then "RETURN" - packets to the "INPUT" chain once it has completed processing workload - endpoint egress policy. Use ACCEPT to unconditionally accept packets - from workloads after processing workload endpoint egress policy. - [Default: Drop]' + description: |- + DefaultEndpointToHostAction controls what happens to traffic that goes from a workload endpoint to the host + itself (after the endpoint's egress policy is applied). By default, Calico blocks traffic from workload + endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from + endpoint to host, set this parameter to RETURN or ACCEPT. Use RETURN if you have your own rules in the iptables + "INPUT" chain; Calico will insert its rules at the top of that chain, then "RETURN" packets to the "INPUT" chain + once it has completed processing workload endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. [Default: Drop] + pattern: ^(?i)(Drop|Accept|Return)?$ type: string deviceRouteProtocol: - description: This defines the route protocol added to programmed device - routes, by default this will be RTPROT_BOOT when left blank. + description: |- + DeviceRouteProtocol controls the protocol to set on routes programmed by Felix. The protocol is an 8-bit label + used to identify the owner of the route. type: integer deviceRouteSourceAddress: - description: This is the IPv4 source address to use on programmed - device routes. By default the source address is left blank, leaving - the kernel to choose the source address used. + description: |- + DeviceRouteSourceAddress IPv4 address to set as the source hint for routes programmed by Felix. When not set + the source address for local traffic from host to workload will be determined by the kernel. type: string deviceRouteSourceAddressIPv6: - description: This is the IPv6 source address to use on programmed - device routes. By default the source address is left blank, leaving - the kernel to choose the source address used. + description: |- + DeviceRouteSourceAddressIPv6 IPv6 address to set as the source hint for routes programmed by Felix. When not set + the source address for local traffic from host to workload will be determined by the kernel. type: string disableConntrackInvalidCheck: + description: |- + DisableConntrackInvalidCheck disables the check for invalid connections in conntrack. While the conntrack + invalid check helps to detect malicious traffic, it can also cause issues with certain multi-NIC scenarios. type: boolean endpointReportingDelay: + description: |- + EndpointReportingDelay is the delay before Felix reports endpoint status to the datastore. This is only used + by the OpenStack integration. [Default: 1s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string endpointReportingEnabled: + description: |- + EndpointReportingEnabled controls whether Felix reports endpoint status to the datastore. This is only used + by the OpenStack integration. [Default: false] type: boolean + endpointStatusPathPrefix: + description: |- + EndpointStatusPathPrefix is the path to the directory where endpoint status will be written. Endpoint status + file reporting is disabled if field is left empty. + + Chosen directory should match the directory used by the CNI plugin for PodStartupDelay. + [Default: /var/run/calico] + type: string externalNodesList: - description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes - which may source tunnel traffic and have the tunneled traffic be - accepted at calico nodes. + description: |- + ExternalNodesCIDRList is a list of CIDR's of external, non-Calico nodes from which VXLAN/IPIP overlay traffic + will be allowed. By default, external tunneled traffic is blocked to reduce attack surface. items: type: string type: array failsafeInboundHostPorts: - description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports - and CIDRs that Felix will allow incoming traffic to host endpoints - on irrespective of the security policy. This is useful to avoid - accidentally cutting off a host with incorrect configuration. For - back-compatibility, if the protocol is not specified, it defaults - to "tcp". If a CIDR is not specified, it will allow traffic from - all addresses. To disable all inbound host ports, use the value - none. The default value allows ssh access and DHCP. [Default: tcp:22, - udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + description: |- + FailsafeInboundHostPorts is a list of ProtoPort struct objects including UDP/TCP/SCTP ports and CIDRs that Felix will + allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally + cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, + it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all inbound host ports, + use the value "[]". The default value allows ssh access, DHCP, BGP, etcd and the Kubernetes API. + [Default: tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:5473, tcp:6443, tcp:6666, tcp:6667 ] items: - description: ProtoPort is combination of protocol, port, and CIDR. - Protocol and port must be specified. + description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: net: type: string @@ -1002,24 +1535,19 @@ spec: type: string required: - port - - protocol type: object type: array failsafeOutboundHostPorts: - description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports - and CIDRs that Felix will allow outgoing traffic from host endpoints - to irrespective of the security policy. This is useful to avoid - accidentally cutting off a host with incorrect configuration. For - back-compatibility, if the protocol is not specified, it defaults - to "tcp". If a CIDR is not specified, it will allow traffic from - all addresses. To disable all outbound host ports, use the value - none. The default value opens etcd''s standard ports to ensure that - Felix does not get cut off from etcd as well as allowing DHCP and - DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, - tcp:6667, udp:53, udp:67]' + description: |- + FailsafeOutboundHostPorts is a list of PortProto struct objects including UDP/TCP/SCTP ports and CIDRs that Felix + will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally + cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all outbound host ports, + use the value "[]". The default value opens etcd's standard ports to ensure that Felix does not get cut off from etcd + as well as allowing DHCP, DNS, BGP and the Kubernetes API. + [Default: udp:53, udp:67, tcp:179, tcp:2379, tcp:2380, tcp:5473, tcp:6443, tcp:6666, tcp:6667 ] items: - description: ProtoPort is combination of protocol, port, and CIDR. - Protocol and port must be specified. + description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: net: type: string @@ -1029,138 +1557,266 @@ spec: type: string required: - port - - protocol type: object type: array featureDetectOverride: - description: FeatureDetectOverride is used to override the feature - detection. Values are specified in a comma separated list with no - spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". - "true" or "false" will force the feature, empty or omitted values - are auto-detected. + description: |- + FeatureDetectOverride is used to override feature detection based on auto-detected platform + capabilities. Values are specified in a comma separated list with no spaces, example; + "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". A value of "true" or "false" will + force enable/disable feature, empty or omitted values fall back to auto-detection. + pattern: ^([a-zA-Z0-9-_]+=(true|false|),)*([a-zA-Z0-9-_]+=(true|false|))?$ + type: string + featureGates: + description: |- + FeatureGates is used to enable or disable tech-preview Calico features. + Values are specified in a comma separated list with no spaces, example; + "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is + used to enable features that are not fully production ready. + pattern: ^([a-zA-Z0-9-_]+=([^=]+),)*([a-zA-Z0-9-_]+=([^=]+))?$ type: string floatingIPs: - default: Disabled - description: FloatingIPs configures whether or not Felix will program - floating IP addresses. + description: |- + FloatingIPs configures whether or not Felix will program non-OpenStack floating IP addresses. (OpenStack-derived + floating IPs are always programmed, regardless of this setting.) enum: - Enabled - Disabled type: string - genericXDPEnabled: - description: 'GenericXDPEnabled enables Generic XDP so network cards - that don''t support XDP offload or driver modes can use XDP. This - is not recommended since it doesn''t provide better performance - than iptables. [Default: false]' - type: boolean - healthEnabled: + flowLogsCollectorDebugTrace: + description: |- + When FlowLogsCollectorDebugTrace is set to true, enables the logs in the collector to be + printed in their entirety. + type: boolean + flowLogsFlushInterval: + description: FlowLogsFlushInterval configures the interval at which Felix exports flow logs. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + flowLogsGoldmaneServer: + description: FlowLogGoldmaneServer is the flow server endpoint to which flow data should be published. + type: string + flowLogsLocalReporter: + description: 'FlowLogsLocalReporter configures local unix socket for reporting flow data from each node. [Default: Disabled]' + enum: + - Disabled + - Enabled + type: string + flowLogsPolicyEvaluationMode: + description: |- + Continuous - Felix evaluates active flows on a regular basis to determine the rule + traces in the flow logs. Any policy updates that impact a flow will be reflected in the + pending_policies field, offering a near-real-time view of policy changes across flows. + None - Felix stops evaluating pending traces. + [Default: Continuous] + enum: + - None + - Continuous + type: string + genericXDPEnabled: + description: |- + GenericXDPEnabled enables Generic XDP so network cards that don't support XDP offload or driver + modes can use XDP. This is not recommended since it doesn't provide better performance than + iptables. [Default: false] + type: boolean + goGCThreshold: + description: |- + GoGCThreshold Sets the Go runtime's garbage collection threshold. I.e. the percentage that the heap is + allowed to grow before garbage collection is triggered. In general, doubling the value halves the CPU time + spent doing GC, but it also doubles peak GC memory overhead. A special value of -1 can be used + to disable GC entirely; this should only be used in conjunction with the GoMemoryLimitMB setting. + + This setting is overridden by the GOGC environment variable. + + [Default: 40] + type: integer + goMaxProcs: + description: |- + GoMaxProcs sets the maximum number of CPUs that the Go runtime will use concurrently. A value of -1 means + "use the system default"; typically the number of real CPUs on the system. + + this setting is overridden by the GOMAXPROCS environment variable. + + [Default: -1] + type: integer + goMemoryLimitMB: + description: |- + GoMemoryLimitMB sets a (soft) memory limit for the Go runtime in MB. The Go runtime will try to keep its memory + usage under the limit by triggering GC as needed. To avoid thrashing, it will exceed the limit if GC starts to + take more than 50% of the process's CPU time. A value of -1 disables the memory limit. + + Note that the memory limit, if used, must be considerably less than any hard resource limit set at the container + or pod level. This is because felix is not the only process that must run in the container or pod. + + This setting is overridden by the GOMEMLIMIT environment variable. + + [Default: -1] + type: integer + healthEnabled: + description: |- + HealthEnabled if set to true, enables Felix's health port, which provides readiness and liveness endpoints. + [Default: false] type: boolean healthHost: + description: 'HealthHost is the host that the health server should bind to. [Default: localhost]' type: string healthPort: + description: 'HealthPort is the TCP port that the health server should bind to. [Default: 9099]' type: integer + healthTimeoutOverrides: + description: |- + HealthTimeoutOverrides allows the internal watchdog timeouts of individual subcomponents to be + overridden. This is useful for working around "false positive" liveness timeouts that can occur + in particularly stressful workloads or if CPU is constrained. For a list of active + subcomponents, see Felix's logs. + items: + properties: + name: + type: string + timeout: + type: string + required: + - name + - timeout + type: object + type: array interfaceExclude: - description: 'InterfaceExclude is a comma-separated list of interfaces - that Felix should exclude when monitoring for host endpoints. The - default value ensures that Felix ignores Kubernetes'' IPVS dummy - interface, which is used internally by kube-proxy. If you want to - exclude multiple interface names using a single value, the list - supports regular expressions. For regular expressions you must wrap - the value with ''/''. For example having values ''/^kube/,veth1'' - will exclude all interfaces that begin with ''kube'' and also the - interface ''veth1''. [Default: kube-ipvs0]' + description: |- + InterfaceExclude A comma-separated list of interface names that should be excluded when Felix is resolving + host endpoints. The default value ensures that Felix ignores Kubernetes' internal `kube-ipvs0` device. If you + want to exclude multiple interface names using a single value, the list supports regular expressions. For + regular expressions you must wrap the value with `/`. For example having values `/^kube/,veth1` will exclude + all interfaces that begin with `kube` and also the interface `veth1`. [Default: kube-ipvs0] type: string interfacePrefix: - description: 'InterfacePrefix is the interface name prefix that identifies - workload endpoints and so distinguishes them from host endpoint - interfaces. Note: in environments other than bare metal, the orchestrators - configure this appropriately. For example our Kubernetes and Docker - integrations set the ''cali'' value, and our OpenStack integration - sets the ''tap'' value. [Default: cali]' + description: |- + InterfacePrefix is the interface name prefix that identifies workload endpoints and so distinguishes + them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker integrations set the 'cali' value, + and our OpenStack integration sets the 'tap' value. [Default: cali] type: string interfaceRefreshInterval: - description: InterfaceRefreshInterval is the period at which Felix - rescans local interfaces to verify their state. The rescan can be - disabled by setting the interval to 0. + description: |- + InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. + The rescan can be disabled by setting the interval to 0. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + ipForwarding: + description: |- + IPForwarding controls whether Felix sets the host sysctls to enable IP forwarding. IP forwarding is required + when using Calico for workload networking. This should be disabled only on hosts where Calico is used solely for + host protection. In BPF mode, due to a kernel interaction, either IPForwarding must be enabled or BPFEnforceRPF + must be disabled. [Default: Enabled] + enum: + - Enabled + - Disabled type: string ipipEnabled: - description: 'IPIPEnabled overrides whether Felix should configure - an IPIP interface on the host. Optional as Felix determines this - based on the existing IP pools. [Default: nil (unset)]' + description: |- + IPIPEnabled overrides whether Felix should configure an IPIP interface on the host. Optional as Felix + determines this based on the existing IP pools. [Default: nil (unset)] type: boolean ipipMTU: - description: 'IPIPMTU is the MTU to set on the tunnel device. See - Configuring MTU [Default: 1440]' + description: |- + IPIPMTU controls the MTU to set on the IPIP tunnel device. Optional as Felix auto-detects the MTU based on the + MTU of the host's interfaces. [Default: 0 (auto-detect)] type: integer ipsetsRefreshInterval: - description: 'IpsetsRefreshInterval is the period at which Felix re-checks - all iptables state to ensure that no other process has accidentally - broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: - 90s]' + description: |- + IpsetsRefreshInterval controls the period at which Felix re-checks all IP sets to look for discrepancies. + Set to 0 to disable the periodic refresh. [Default: 90s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesBackend: - description: IptablesBackend specifies which backend of iptables will - be used. The default is legacy. + description: |- + IptablesBackend controls which backend of iptables will be used. The default is `Auto`. + + Warning: changing this on a running system can leave "orphaned" rules in the "other" backend. These + should be cleaned up to avoid confusing interactions. + pattern: ^(?i)(Auto|Legacy|NFT)?$ type: string iptablesFilterAllowAction: + description: |- + IptablesFilterAllowAction controls what happens to traffic that is accepted by a Felix policy chain in the + iptables filter table (which is used for "normal" policy). The default will immediately `Accept` the traffic. Use + `Return` to send the traffic back up to the system chains for further processing. + pattern: ^(?i)(Accept|Return)?$ + type: string + iptablesFilterDenyAction: + description: |- + IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic + with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here. + pattern: ^(?i)(Drop|Reject)?$ type: string iptablesLockFilePath: - description: 'IptablesLockFilePath is the location of the iptables - lock file. You may need to change this if the lock file is not in - its standard location (for example if you have mapped it into Felix''s - container at a different path). [Default: /run/xtables.lock]' + description: |- + IptablesLockFilePath is the location of the iptables lock file. You may need to change this + if the lock file is not in its standard location (for example if you have mapped it into Felix's + container at a different path). [Default: /run/xtables.lock] type: string iptablesLockProbeInterval: - description: 'IptablesLockProbeInterval is the time that Felix will - wait between attempts to acquire the iptables lock if it is not - available. Lower values make Felix more responsive when the lock - is contended, but use more CPU. [Default: 50ms]' + description: |- + IptablesLockProbeInterval when IptablesLockTimeout is enabled: the time that Felix will wait between + attempts to acquire the iptables lock if it is not available. Lower values make Felix more + responsive when the lock is contended, but use more CPU. [Default: 50ms] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesLockTimeout: - description: 'IptablesLockTimeout is the time that Felix will wait - for the iptables lock, or 0, to disable. To use this feature, Felix - must share the iptables lock file with all other processes that - also take the lock. When running Felix inside a container, this - requires the /run directory of the host to be mounted into the calico/node - or calico/felix container. [Default: 0s disabled]' + description: |- + IptablesLockTimeout is the time that Felix itself will wait for the iptables lock (rather than delegating the + lock handling to the `iptables` command). + + Deprecated: `iptables-restore` v1.8+ always takes the lock, so enabling this feature results in deadlock. + [Default: 0s disabled] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesMangleAllowAction: + description: |- + IptablesMangleAllowAction controls what happens to traffic that is accepted by a Felix policy chain in the + iptables mangle table (which is used for "pre-DNAT" policy). The default will immediately `Accept` the traffic. + Use `Return` to send the traffic back up to the system chains for further processing. + pattern: ^(?i)(Accept|Return)?$ type: string iptablesMarkMask: - description: 'IptablesMarkMask is the mask that Felix selects its - IPTables Mark bits from. Should be a 32 bit hexadecimal number with - at least 8 bits set, none of which clash with any other mark bits - in use on the system. [Default: 0xff000000]' + description: |- + IptablesMarkMask is the mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal + number with at least 8 bits set, none of which clash with any other mark bits in use on the system. + [Default: 0xffff0000] format: int32 type: integer iptablesNATOutgoingInterfaceFilter: + description: |- + This parameter can be used to limit the host interfaces on which Calico will apply SNAT to traffic leaving a + Calico IPAM pool with "NAT outgoing" enabled. This can be useful if you have a main data interface, where + traffic should be SNATted and a secondary device (such as the docker bridge) which is local to the host and + doesn't require SNAT. This parameter uses the iptables interface matching syntax, which allows + as a + wildcard. Most users will not need to set this. Example: if your data interfaces are eth0 and eth1 and you + want to exclude the docker bridge, you could set this to eth+ type: string iptablesPostWriteCheckInterval: - description: 'IptablesPostWriteCheckInterval is the period after Felix - has done a write to the dataplane that it schedules an extra read - back in order to check the write was not clobbered by another process. - This should only occur if another application on the system doesn''t - respect the iptables lock. [Default: 1s]' + description: |- + IptablesPostWriteCheckInterval is the period after Felix has done a write + to the dataplane that it schedules an extra read back in order to check the write was not + clobbered by another process. This should only occur if another application on the system + doesn't respect the iptables lock. [Default: 1s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesRefreshInterval: - description: 'IptablesRefreshInterval is the period at which Felix - re-checks the IP sets in the dataplane to ensure that no other process - has accidentally broken Calico''s rules. Set to 0 to disable IP - sets refresh. Note: the default for this value is lower than the - other refresh intervals as a workaround for a Linux kernel bug that - was fixed in kernel version 4.11. If you are using v4.11 or greater - you may want to set this to, a higher value to reduce Felix CPU - usage. [Default: 10s]' + description: |- + IptablesRefreshInterval is the period at which Felix re-checks the IP sets + in the dataplane to ensure that no other process has accidentally broken Calico's rules. + Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that was fixed in kernel + version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value + to reduce Felix CPU usage. [Default: 10s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipv6Support: - description: IPv6Support controls whether Felix enables support for - IPv6 (if supported by the in-use dataplane). + description: IPv6Support controls whether Felix enables support for IPv6 (if supported by the in-use dataplane). type: boolean kubeNodePortRanges: - description: 'KubeNodePortRanges holds list of port ranges used for - service node ports. Only used if felix detects kube-proxy running - in ipvs mode. Felix uses these ranges to separate host and workload - traffic. [Default: 30000:32767].' + description: |- + KubeNodePortRanges holds list of port ranges used for service node ports. Only used if felix detects kube-proxy running in ipvs mode. + Felix uses these ranges to separate host and workload traffic. [Default: 30000:32767]. items: anyOf: - type: integer @@ -1169,143 +1825,189 @@ spec: x-kubernetes-int-or-string: true type: array logDebugFilenameRegex: - description: LogDebugFilenameRegex controls which source code files - have their Debug log output included in the logs. Only logs from - files with names that match the given regular expression are included. The - filter only applies to Debug level logs. + description: |- + LogDebugFilenameRegex controls which source code files have their Debug log output included in the logs. + Only logs from files with names that match the given regular expression are included. The filter only applies + to Debug level logs. type: string logFilePath: - description: 'LogFilePath is the full path to the Felix log. Set to - none to disable file logging. [Default: /var/log/calico/felix.log]' + description: 'LogFilePath is the full path to the Felix log. Set to none to disable file logging. [Default: /var/log/calico/felix.log]' type: string logPrefix: - description: 'LogPrefix is the log prefix that Felix uses when rendering - LOG rules. [Default: calico-packet]' + description: 'LogPrefix is the log prefix that Felix uses when rendering LOG rules. [Default: calico-packet]' type: string logSeverityFile: - description: 'LogSeverityFile is the log severity above which logs - are sent to the log file. [Default: Info]' + description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' + pattern: ^(?i)(Trace|Debug|Info|Warning|Error|Fatal)?$ type: string logSeverityScreen: - description: 'LogSeverityScreen is the log severity above which logs - are sent to the stdout. [Default: Info]' + description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' + pattern: ^(?i)(Trace|Debug|Info|Warning|Error|Fatal)?$ type: string logSeveritySys: - description: 'LogSeveritySys is the log severity above which logs - are sent to the syslog. Set to None for no logging to syslog. [Default: - Info]' + description: |- + LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. + [Default: Info] + pattern: ^(?i)(Trace|Debug|Info|Warning|Error|Fatal)?$ type: string maxIpsetSize: + description: |- + MaxIpsetSize is the maximum number of IP addresses that can be stored in an IP set. Not applicable + if using the nftables backend. type: integer metadataAddr: - description: 'MetadataAddr is the IP address or domain name of the - server that can answer VM queries for cloud-init metadata. In OpenStack, - this corresponds to the machine running nova-api (or in Ubuntu, - nova-api-metadata). A value of none (case insensitive) means that - Felix should not set up any NAT rule for the metadata path. [Default: - 127.0.0.1]' + description: |- + MetadataAddr is the IP address or domain name of the server that can answer VM queries for + cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in + Ubuntu, nova-api-metadata). A value of none (case-insensitive) means that Felix should not + set up any NAT rule for the metadata path. [Default: 127.0.0.1] type: string metadataPort: - description: 'MetadataPort is the port of the metadata server. This, - combined with global.MetadataAddr (if not ''None''), is used to - set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. - In most cases this should not need to be changed [Default: 8775].' + description: |- + MetadataPort is the port of the metadata server. This, combined with global.MetadataAddr (if + not 'None'), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775]. type: integer mtuIfacePattern: - description: MTUIfacePattern is a regular expression that controls - which interfaces Felix should scan in order to calculate the host's - MTU. This should not match workload interfaces (usually named cali...). + description: |- + MTUIfacePattern is a regular expression that controls which interfaces Felix should scan in order + to calculate the host's MTU. + This should not match workload interfaces (usually named cali...). type: string natOutgoingAddress: - description: NATOutgoingAddress specifies an address to use when performing - source NAT for traffic in a natOutgoing pool that is leaving the - network. By default the address used is an address on the interface - the traffic is leaving on (ie it uses the iptables MASQUERADE target) + description: |- + NATOutgoingAddress specifies an address to use when performing source NAT for traffic in a natOutgoing pool that + is leaving the network. By default the address used is an address on the interface the traffic is leaving on + (i.e. it uses the iptables MASQUERADE target). type: string natPortRange: anyOf: - type: integer - type: string - description: NATPortRange specifies the range of ports that is used - for port mapping when doing outgoing NAT. When unset the default - behavior of the network stack is used. + description: |- + NATPortRange specifies the range of ports that is used for port mapping when doing outgoing NAT. When unset the default behavior of the + network stack is used. pattern: ^.* x-kubernetes-int-or-string: true netlinkTimeout: + description: |- + NetlinkTimeout is the timeout when talking to the kernel over the netlink protocol, used for programming + routes, rules, and other kernel objects. [Default: 10s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + nftablesFilterAllowAction: + description: |- + NftablesFilterAllowAction controls the nftables action that Felix uses to represent the "allow" policy verdict + in the filter table. The default is to `ACCEPT` the traffic, which is a terminal action. Alternatively, + `RETURN` can be used to return the traffic back to the top-level chain for further processing by your rules. + pattern: ^(?i)(Accept|Return)?$ + type: string + nftablesFilterDenyAction: + description: |- + NftablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default, Calico + blocks traffic with a "drop" action. If you want to use a "reject" action instead you can configure it here. + pattern: ^(?i)(Drop|Reject)?$ + type: string + nftablesMangleAllowAction: + description: |- + NftablesMangleAllowAction controls the nftables action that Felix uses to represent the "allow" policy verdict + in the mangle table. The default is to `ACCEPT` the traffic, which is a terminal action. Alternatively, + `RETURN` can be used to return the traffic back to the top-level chain for further processing by your rules. + pattern: ^(?i)(Accept|Return)?$ + type: string + nftablesMarkMask: + description: |- + NftablesMarkMask is the mask that Felix selects its nftables Mark bits from. Should be a 32 bit hexadecimal + number with at least 8 bits set, none of which clash with any other mark bits in use on the system. + [Default: 0xffff0000] + format: int32 + type: integer + nftablesMode: + description: 'NFTablesMode configures nftables support in Felix. [Default: Disabled]' + enum: + - Disabled + - Enabled + - Auto + type: string + nftablesRefreshInterval: + description: 'NftablesRefreshInterval controls the interval at which Felix periodically refreshes the nftables rules. [Default: 90s]' type: string openstackRegion: - description: 'OpenstackRegion is the name of the region that a particular - Felix belongs to. In a multi-region Calico/OpenStack deployment, - this must be configured somehow for each Felix (here in the datamodel, - or in felix.cfg or the environment on each compute node), and must - match the [calico] openstack_region value configured in neutron.conf - on each node. [Default: Empty]' + description: |- + OpenstackRegion is the name of the region that a particular Felix belongs to. In a multi-region + Calico/OpenStack deployment, this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must match the [calico] + openstack_region value configured in neutron.conf on each node. [Default: Empty] type: string policySyncPathPrefix: - description: 'PolicySyncPathPrefix is used to by Felix to communicate - policy changes to external services, like Application layer policy. - [Default: Empty]' + description: |- + PolicySyncPathPrefix is used to by Felix to communicate policy changes to external services, + like Application layer policy. [Default: Empty] type: string prometheusGoMetricsEnabled: - description: 'PrometheusGoMetricsEnabled disables Go runtime metrics - collection, which the Prometheus client does by default, when set - to false. This reduces the number of metrics reported, reducing - Prometheus load. [Default: true]' + description: |- + PrometheusGoMetricsEnabled disables Go runtime metrics collection, which the Prometheus client does by default, when + set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true] type: boolean prometheusMetricsEnabled: - description: 'PrometheusMetricsEnabled enables the Prometheus metrics - server in Felix if set to true. [Default: false]' + description: 'PrometheusMetricsEnabled enables the Prometheus metrics server in Felix if set to true. [Default: false]' type: boolean prometheusMetricsHost: - description: 'PrometheusMetricsHost is the host that the Prometheus - metrics server should bind to. [Default: empty]' + description: 'PrometheusMetricsHost is the host that the Prometheus metrics server should bind to. [Default: empty]' type: string prometheusMetricsPort: - description: 'PrometheusMetricsPort is the TCP port that the Prometheus - metrics server should bind to. [Default: 9091]' + description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. [Default: 9091]' type: integer prometheusProcessMetricsEnabled: - description: 'PrometheusProcessMetricsEnabled disables process metrics - collection, which the Prometheus client does by default, when set - to false. This reduces the number of metrics reported, reducing - Prometheus load. [Default: true]' + description: |- + PrometheusProcessMetricsEnabled disables process metrics collection, which the Prometheus client does by default, when + set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true] type: boolean prometheusWireGuardMetricsEnabled: - description: 'PrometheusWireGuardMetricsEnabled disables wireguard - metrics collection, which the Prometheus client does by default, - when set to false. This reduces the number of metrics reported, - reducing Prometheus load. [Default: true]' + description: |- + PrometheusWireGuardMetricsEnabled disables wireguard metrics collection, which the Prometheus client does by default, when + set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true] type: boolean removeExternalRoutes: - description: Whether or not to remove device routes that have not - been programmed by Felix. Disabling this will allow external applications - to also add device routes. This is enabled by default which means - we will remove externally added routes. + description: |- + RemoveExternalRoutes Controls whether Felix will remove unexpected routes to workload interfaces. Felix will + always clean up expected routes that use the configured DeviceRouteProtocol. To add your own routes, you must + use a distinct protocol (in addition to setting this field to false). type: boolean reportingInterval: - description: 'ReportingInterval is the interval at which Felix reports - its status into the datastore or 0 to disable. Must be non-zero - in OpenStack deployments. [Default: 30s]' + description: |- + ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. + Must be non-zero in OpenStack deployments. [Default: 30s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string reportingTTL: - description: 'ReportingTTL is the time-to-live setting for process-wide - status reports. [Default: 90s]' + description: 'ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeRefreshInterval: - description: 'RouteRefreshInterval is the period at which Felix re-checks - the routes in the dataplane to ensure that no other process has - accidentally broken Calico''s rules. Set to 0 to disable route refresh. - [Default: 90s]' + description: |- + RouteRefreshInterval is the period at which Felix re-checks the routes + in the dataplane to ensure that no other process has accidentally broken Calico's rules. + Set to 0 to disable route refresh. [Default: 90s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeSource: - description: 'RouteSource configures where Felix gets its routing - information. - WorkloadIPs: use workload endpoints to construct - routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + description: |- + RouteSource configures where Felix gets its routing information. + - WorkloadIPs: use workload endpoints to construct routes. + - CalicoIPAM: the default - use IPAM data to construct routes. + pattern: ^(?i)(WorkloadIPs|CalicoIPAM)?$ type: string + routeSyncDisabled: + description: |- + RouteSyncDisabled will disable all operations performed on the route table. Set to true to + run in network-policy mode only. + type: boolean routeTableRange: - description: Deprecated in favor of RouteTableRanges. Calico programs - additional Linux route tables for various purposes. RouteTableRange - specifies the indices of the route tables that Calico should use. + description: |- + Deprecated in favor of RouteTableRanges. + Calico programs additional Linux route tables for various purposes. + RouteTableRange specifies the indices of the route tables that Calico should use. properties: max: type: integer @@ -1316,9 +2018,10 @@ spec: - min type: object routeTableRanges: - description: Calico programs additional Linux route tables for various - purposes. RouteTableRanges specifies a set of table index ranges - that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + description: |- + Calico programs additional Linux route tables for various purposes. + RouteTableRanges specifies a set of table index ranges that Calico should use. + Deprecates`RouteTableRange`, overrides `RouteTableRange`. items: properties: max: @@ -1331,112 +2034,133 @@ spec: type: object type: array serviceLoopPrevention: - description: 'When service IP advertisement is enabled, prevent routing - loops to service IPs that are not in use, by dropping or rejecting - packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", - in which case such routing loops continue to be allowed. [Default: - Drop]' + description: |- + When service IP advertisement is enabled, prevent routing loops to service IPs that are + not in use, by dropping or rejecting packets that do not get DNAT'd by kube-proxy. + Unless set to "Disabled", in which case such routing loops continue to be allowed. + [Default: Drop] + pattern: ^(?i)(Drop|Reject|Disabled)?$ type: string sidecarAccelerationEnabled: - description: 'SidecarAccelerationEnabled enables experimental sidecar - acceleration [Default: false]' + description: 'SidecarAccelerationEnabled enables experimental sidecar acceleration [Default: false]' type: boolean usageReportingEnabled: - description: 'UsageReportingEnabled reports anonymous Calico version - number and cluster size to projectcalico.org. Logs warnings returned - by the usage server. For example, if a significant security vulnerability - has been discovered in the version of Calico being used. [Default: - true]' + description: |- + UsageReportingEnabled reports anonymous Calico version number and cluster size to projectcalico.org. Logs warnings returned by the usage + server. For example, if a significant security vulnerability has been discovered in the version of Calico being used. [Default: true] type: boolean usageReportingInitialDelay: - description: 'UsageReportingInitialDelay controls the minimum delay - before Felix makes a report. [Default: 300s]' + description: 'UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string usageReportingInterval: - description: 'UsageReportingInterval controls the interval at which - Felix makes reports. [Default: 86400s]' + description: 'UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string useInternalDataplaneDriver: - description: UseInternalDataplaneDriver, if true, Felix will use its - internal dataplane programming logic. If false, it will launch - an external dataplane driver and communicate with it over protobuf. + description: |- + UseInternalDataplaneDriver, if true, Felix will use its internal dataplane programming logic. If false, it + will launch an external dataplane driver and communicate with it over protobuf. type: boolean vxlanEnabled: - description: 'VXLANEnabled overrides whether Felix should create the - VXLAN tunnel device for VXLAN networking. Optional as Felix determines - this based on the existing IP pools. [Default: nil (unset)]' + description: |- + VXLANEnabled overrides whether Felix should create the VXLAN tunnel device for IPv4 VXLAN networking. + Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)] type: boolean vxlanMTU: - description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel - device. See Configuring MTU [Default: 1410]' + description: |- + VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel device. Optional as Felix auto-detects the MTU based on the + MTU of the host's interfaces. [Default: 0 (auto-detect)] type: integer vxlanMTUV6: - description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel - device. See Configuring MTU [Default: 1390]' + description: |- + VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel device. Optional as Felix auto-detects the MTU based on the + MTU of the host's interfaces. [Default: 0 (auto-detect)] type: integer vxlanPort: + description: 'VXLANPort is the UDP port number to use for VXLAN traffic. [Default: 4789]' type: integer vxlanVNI: + description: |- + VXLANVNI is the VXLAN VNI to use for VXLAN traffic. You may need to change this if the default value is + in use on your system. [Default: 4096] type: integer + windowsManageFirewallRules: + description: 'WindowsManageFirewallRules configures whether or not Felix will program Windows Firewall rules (to allow inbound access to its own metrics ports). [Default: Disabled]' + enum: + - Enabled + - Disabled + type: string wireguardEnabled: - description: 'WireguardEnabled controls whether Wireguard is enabled. - [Default: false]' + description: 'WireguardEnabled controls whether Wireguard is enabled for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). [Default: false]' + type: boolean + wireguardEnabledV6: + description: 'WireguardEnabledV6 controls whether Wireguard is enabled for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). [Default: false]' type: boolean wireguardHostEncryptionEnabled: - description: 'WireguardHostEncryptionEnabled controls whether Wireguard - host-to-host encryption is enabled. [Default: false]' + description: 'WireguardHostEncryptionEnabled controls whether Wireguard host-to-host encryption is enabled. [Default: false]' type: boolean wireguardInterfaceName: - description: 'WireguardInterfaceName specifies the name to use for - the Wireguard interface. [Default: wg.calico]' + description: 'WireguardInterfaceName specifies the name to use for the IPv4 Wireguard interface. [Default: wireguard.cali]' + type: string + wireguardInterfaceNameV6: + description: 'WireguardInterfaceNameV6 specifies the name to use for the IPv6 Wireguard interface. [Default: wg-v6.cali]' type: string wireguardKeepAlive: - description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive - option. Set 0 to disable. [Default: 0]' + description: 'WireguardPersistentKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string wireguardListeningPort: - description: 'WireguardListeningPort controls the listening port used - by Wireguard. [Default: 51820]' + description: 'WireguardListeningPort controls the listening port used by IPv4 Wireguard. [Default: 51820]' + type: integer + wireguardListeningPortV6: + description: 'WireguardListeningPortV6 controls the listening port used by IPv6 Wireguard. [Default: 51821]' type: integer wireguardMTU: - description: 'WireguardMTU controls the MTU on the Wireguard interface. - See Configuring MTU [Default: 1420]' + description: 'WireguardMTU controls the MTU on the IPv4 Wireguard interface. See Configuring MTU [Default: 1440]' + type: integer + wireguardMTUV6: + description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard interface. See Configuring MTU [Default: 1420]' type: integer wireguardRoutingRulePriority: - description: 'WireguardRoutingRulePriority controls the priority value - to use for the Wireguard routing rule. [Default: 99]' + description: 'WireguardRoutingRulePriority controls the priority value to use for the Wireguard routing rule. [Default: 99]' type: integer + wireguardThreadingEnabled: + description: |- + WireguardThreadingEnabled controls whether Wireguard has Threaded NAPI enabled. [Default: false] + This increases the maximum number of packets a Wireguard interface can process. + Consider threaded NAPI only if you have high packets per second workloads that are causing dropping packets due to a saturated `softirq` CPU core. + There is a [known issue](https://lore.kernel.org/netdev/CALrw=nEoT2emQ0OAYCjM1d_6Xe_kNLSZ6dhjb5FxrLFYh4kozA@mail.gmail.com/T/) with this setting + that may cause NAPI to get stuck holding the global `rtnl_mutex` when a peer is removed. + Workaround: Make sure your Linux kernel [includes this patch](https://github.com/torvalds/linux/commit/56364c910691f6d10ba88c964c9041b9ab777bd6) to unwedge NAPI. + type: boolean workloadSourceSpoofing: - description: WorkloadSourceSpoofing controls whether pods can use - the allowedSourcePrefixes annotation to send traffic with a source - IP address that is not theirs. This is disabled by default. When - set to "Any", pods can request any prefix. + description: |- + WorkloadSourceSpoofing controls whether pods can use the allowedSourcePrefixes annotation to send traffic with a source IP + address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. + pattern: ^(?i)(Disabled|Any)?$ type: string xdpEnabled: - description: 'XDPEnabled enables XDP acceleration for suitable untracked - incoming deny rules. [Default: true]' + description: 'XDPEnabled enables XDP acceleration for suitable untracked incoming deny rules. [Default: true]' type: boolean xdpRefreshInterval: - description: 'XDPRefreshInterval is the period at which Felix re-checks - all XDP state to ensure that no other process has accidentally broken - Calico''s BPF maps or attached programs. Set to 0 to disable XDP - refresh. [Default: 90s]' + description: |- + XDPRefreshInterval is the period at which Felix re-checks all XDP state to ensure that no + other process has accidentally broken Calico's BPF maps or attached programs. Set to 0 to + disable XDP refresh. [Default: 90s] + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string type: object type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: globalnetworkpolicies.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -1445,6 +2169,7 @@ spec: listKind: GlobalNetworkPolicyList plural: globalnetworkpolicies singular: globalnetworkpolicy + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -1452,84 +2177,86 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: applyOnForward: - description: ApplyOnForward indicates to apply the rules in this policy - on forward traffic. + description: ApplyOnForward indicates to apply the rules in this policy on forward traffic. type: boolean doNotTrack: - description: DoNotTrack indicates whether packets matched by the rules - in this policy should go through the data plane's connection tracking, - such as Linux conntrack. If True, the rules in this policy are - applied before any data plane connection tracking, and packets allowed - by this policy are marked as not to be tracked. + description: |- + DoNotTrack indicates whether packets matched by the rules in this policy should go through + the data plane's connection tracking, such as Linux conntrack. If True, the rules in + this policy are applied before any data plane connection tracking, and packets allowed by + this policy are marked as not to be tracked. type: boolean egress: - description: The ordered set of egress rules. Each rule contains - a set of packet match criteria and a corresponding action to apply. + description: |- + The ordered set of egress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. items: - description: "A Rule encapsulates a set of match criteria and an - action. Both selector-based security Policy and security Profiles - reference rules - separated out as a list of rules for both ingress - and egress packet matching. \n Each positive match criteria has - a negated version, prefixed with \"Not\". All the match criteria - within a rule must be satisfied for a packet to match. A single - rule can contain the positive and negative version of a match - and both must be satisfied for the rule to match." + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. properties: action: type: string destination: - description: Destination contains the match criteria that apply - to destination entity. + description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -1538,18 +2265,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -1558,91 +2285,73 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object http: - description: HTTP contains match criteria that apply to HTTP - requests. + description: HTTP contains match criteria that apply to HTTP requests. properties: methods: - description: Methods is an optional field that restricts - the rule to apply only to HTTP requests that use one of - the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple - methods are OR'd together. + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. items: type: string type: array paths: - description: 'Paths is an optional field that restricts - the rule to apply to HTTP requests that use one of the - listed HTTP Paths. Multiple paths are OR''d together. - e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a `exact` or a `prefix` match. The - validator will check for it.' + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. items: - description: 'HTTPPath specifies an HTTP path to match. - It may be either of the form: exact: : which matches - the path exactly or prefix: : which matches - the path prefix' + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix properties: exact: type: string @@ -1652,110 +2361,108 @@ spec: type: array type: object icmp: - description: ICMP is an optional field that restricts the rule - to apply to a specific type and code of ICMP traffic. This - should only be specified if the Protocol field is set to "ICMP" - or "ICMPv6". + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object ipVersion: - description: IPVersion is an optional field that restricts the - rule to only match a specific IP version. + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. type: integer metadata: - description: Metadata contains additional information for this - rule + description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string - description: Annotations is a set of key value pairs that - give extra information about the rule + description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string - description: NotProtocol is the negated version of the Protocol - field. + description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string - description: "Protocol is an optional field that restricts the - rule to only apply to traffic of a specific IP protocol. Required - if any of the EntityRules contain Ports (because ports only - apply to certain protocols). \n Must be one of these string - values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", - \"UDPLite\" or an integer in the range 1-255." + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. pattern: ^.* x-kubernetes-int-or-string: true source: - description: Source contains the match criteria that apply to - source entity. + description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -1764,18 +2471,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -1784,64 +2491,45 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object @@ -1850,59 +2538,57 @@ spec: type: object type: array ingress: - description: The ordered set of ingress rules. Each rule contains - a set of packet match criteria and a corresponding action to apply. + description: |- + The ordered set of ingress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. items: - description: "A Rule encapsulates a set of match criteria and an - action. Both selector-based security Policy and security Profiles - reference rules - separated out as a list of rules for both ingress - and egress packet matching. \n Each positive match criteria has - a negated version, prefixed with \"Not\". All the match criteria - within a rule must be satisfied for a packet to match. A single - rule can contain the positive and negative version of a match - and both must be satisfied for the rule to match." + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. properties: action: type: string destination: - description: Destination contains the match criteria that apply - to destination entity. + description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -1911,18 +2597,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -1931,91 +2617,73 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object http: - description: HTTP contains match criteria that apply to HTTP - requests. + description: HTTP contains match criteria that apply to HTTP requests. properties: methods: - description: Methods is an optional field that restricts - the rule to apply only to HTTP requests that use one of - the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple - methods are OR'd together. + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. items: type: string type: array paths: - description: 'Paths is an optional field that restricts - the rule to apply to HTTP requests that use one of the - listed HTTP Paths. Multiple paths are OR''d together. - e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a `exact` or a `prefix` match. The - validator will check for it.' + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. items: - description: 'HTTPPath specifies an HTTP path to match. - It may be either of the form: exact: : which matches - the path exactly or prefix: : which matches - the path prefix' + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix properties: exact: type: string @@ -2025,110 +2693,108 @@ spec: type: array type: object icmp: - description: ICMP is an optional field that restricts the rule - to apply to a specific type and code of ICMP traffic. This - should only be specified if the Protocol field is set to "ICMP" - or "ICMPv6". + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object ipVersion: - description: IPVersion is an optional field that restricts the - rule to only match a specific IP version. + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. type: integer metadata: - description: Metadata contains additional information for this - rule + description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string - description: Annotations is a set of key value pairs that - give extra information about the rule + description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string - description: NotProtocol is the negated version of the Protocol - field. + description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string - description: "Protocol is an optional field that restricts the - rule to only apply to traffic of a specific IP protocol. Required - if any of the EntityRules contain Ports (because ports only - apply to certain protocols). \n Must be one of these string - values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", - \"UDPLite\" or an integer in the range 1-255." + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. pattern: ^.* x-kubernetes-int-or-string: true source: - description: Source contains the match criteria that apply to - source entity. + description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -2137,18 +2803,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -2157,64 +2823,45 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object @@ -2223,75 +2870,79 @@ spec: type: object type: array namespaceSelector: - description: NamespaceSelector is an optional field for an expression - used to select a pod based on namespaces. + description: NamespaceSelector is an optional field for an expression used to select a pod based on namespaces. type: string order: - description: Order is an optional field that specifies the order in - which the policy is applied. Policies with higher "order" are applied - after those with lower order. If the order is omitted, it may be - considered to be "infinite" - i.e. the policy will be applied last. Policies - with identical order will be applied in alphanumerical order based - on the Policy "Name". + description: |- + Order is an optional field that specifies the order in which the policy is applied. + Policies with higher "order" are applied after those with lower + order within the same tier. If the order is omitted, it may be considered to be "infinite" - i.e. the + policy will be applied last. Policies with identical order will be applied in + alphanumerical order based on the Policy "Name" within the tier. type: number + performanceHints: + description: |- + PerformanceHints contains a list of hints to Calico's policy engine to + help process the policy more efficiently. Hints never change the + enforcement behaviour of the policy. + + Currently, the only available hint is "AssumeNeededOnEveryNode". When + that hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for "preloading" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work + done to preload the policy (and to maintain it) is wasted. + items: + type: string + type: array preDNAT: - description: PreDNAT indicates to apply the rules in this policy before - any DNAT. + description: PreDNAT indicates to apply the rules in this policy before any DNAT. type: boolean selector: - description: "The selector is an expression used to pick pick out - the endpoints that the policy should be applied to. \n Selector - expressions follow this syntax: \n \tlabel == \"string_literal\" - \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" - \ -> not equal; also matches if label is not present \tlabel in - { \"a\", \"b\", \"c\", ... } -> true if the value of label X is - one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", - ... } -> true if the value of label X is not one of \"a\", \"b\", - \"c\" \thas(label_name) -> True if that label is present \t! expr - -> negation of expr \texpr && expr -> Short-circuit and \texpr - || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() - or the empty selector -> matches all endpoints. \n Label names are - allowed to contain alphanumerics, -, _ and /. String literals are - more permissive but they do not support escape characters. \n Examples - (with made-up labels): \n \ttype == \"webserver\" && deployment - == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != - \"dev\" \t! has(label_name)" + description: "The selector is an expression used to pick out the endpoints that the policy should\nbe applied to.\n\nSelector expressions follow this syntax:\n\n\tlabel == \"string_literal\" -> comparison, e.g. my_label == \"foo bar\"\n\tlabel != \"string_literal\" -> not equal; also matches if label is not present\n\tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\"\n\tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\"\n\thas(label_name) -> True if that label is present\n\t! expr -> negation of expr\n\texpr && expr -> Short-circuit and\n\texpr || expr -> Short-circuit or\n\t( expr ) -> parens for grouping\n\tall() or the empty selector -> matches all endpoints.\n\nLabel names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive\nbut they do not support escape characters.\n\nExamples (with made-up labels):\n\n\ttype == \"webserver\" && deployment == \"prod\"\n\ttype in {\"frontend\", \"backend\"}\n\tdeployment != \"dev\"\n\t! has(label_name)" type: string serviceAccountSelector: - description: ServiceAccountSelector is an optional field for an expression - used to select a pod based on service accounts. + description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. + type: string + tier: + description: |- + The name of the tier that this policy belongs to. If this is omitted, the default + tier (name is "default") is assumed. The specified tier must exist in order to create + security policies within the tier, the "default" tier is created automatically if it + does not exist, this means for deployments requiring only a single Tier, the tier name + may be omitted on all policy management requests. type: string types: - description: "Types indicates whether this policy applies to ingress, - or to egress, or to both. When not explicitly specified (and so - the value on creation is empty or nil), Calico defaults Types according - to what Ingress and Egress rules are present in the policy. The - default is: \n - [ PolicyTypeIngress ], if there are no Egress rules - (including the case where there are also no Ingress rules) \n - - [ PolicyTypeEgress ], if there are Egress rules but no Ingress - rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are - both Ingress and Egress rules. \n When the policy is read back again, - Types will always be one of these values, never empty or nil." + description: |- + Types indicates whether this policy applies to ingress, or to egress, or to both. When + not explicitly specified (and so the value on creation is empty or nil), Calico defaults + Types according to what Ingress and Egress rules are present in the policy. The + default is: + + - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are + also no Ingress rules) + + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules + + - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. + + When the policy is read back again, Types will always be one of these values, never empty + or nil. items: - description: PolicyType enumerates the possible values of the PolicySpec - Types field. + description: PolicyType enumerates the possible values of the PolicySpec Types field. type: string type: array type: object type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: globalnetworksets.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2300,30 +2951,35 @@ spec: listKind: GlobalNetworkSetList plural: globalnetworksets singular: globalnetworkset + preserveUnknownFields: false scope: Cluster versions: - name: v1 schema: openAPIV3Schema: - description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs - that share labels to allow rules to refer to them via selectors. The labels - of GlobalNetworkSet are not namespaced. + description: |- + GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs that share labels to + allow rules to refer to them via selectors. The labels of GlobalNetworkSet are not namespaced. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: GlobalNetworkSetSpec contains the specification for a NetworkSet - resource. + description: GlobalNetworkSetSpec contains the specification for a NetworkSet resource. properties: nets: description: The list of IP networks that belong to this set. @@ -2334,17 +2990,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: hostendpoints.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2353,6 +3005,7 @@ spec: listKind: HostEndpointList plural: hostendpoints singular: hostendpoint + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2360,58 +3013,52 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HostEndpointSpec contains the specification for a HostEndpoint - resource. + description: HostEndpointSpec contains the specification for a HostEndpoint resource. properties: expectedIPs: - description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. - If \"InterfaceName\" is not present, Calico will look for an interface - matching any of the IPs in the list and apply policy to that. Note: - \tWhen using the selector match criteria in an ingress or egress - security Policy \tor Profile, Calico converts the selector into - a set of IP addresses. For host \tendpoints, the ExpectedIPs field - is used for that purpose. (If only the interface \tname is specified, - Calico does not learn the IPs of the interface for use in match - \tcriteria.)" + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint.\nIf \"InterfaceName\" is not present, Calico will look for an interface matching any\nof the IPs in the list and apply policy to that.\nNote:\n\tWhen using the selector match criteria in an ingress or egress security Policy\n\tor Profile, Calico converts the selector into a set of IP addresses. For host\n\tendpoints, the ExpectedIPs field is used for that purpose. (If only the interface\n\tname is specified, Calico does not learn the IPs of the interface for use in match\n\tcriteria.)" items: type: string type: array interfaceName: - description: "Either \"*\", or the name of a specific Linux interface - to apply policy to; or empty. \"*\" indicates that this HostEndpoint - governs all traffic to, from or through the default network namespace - of the host named by the \"Node\" field; entering and leaving that - namespace via any interface, including those from/to non-host-networked - local workloads. \n If InterfaceName is not \"*\", this HostEndpoint - only governs traffic that enters or leaves the host through the - specific interface named by InterfaceName, or - when InterfaceName - is empty - through the specific interface that has one of the IPs - in ExpectedIPs. Therefore, when InterfaceName is empty, at least - one expected IP must be specified. Only external interfaces (such - as \"eth0\") are supported here; it isn't possible for a HostEndpoint - to protect traffic through a specific local workload interface. - \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; - initially just pre-DNAT policy. Please check Calico documentation - for the latest position." + description: |- + Either "*", or the name of a specific Linux interface to apply policy to; or empty. "*" + indicates that this HostEndpoint governs all traffic to, from or through the default + network namespace of the host named by the "Node" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked local workloads. + + If InterfaceName is not "*", this HostEndpoint only governs traffic that enters or leaves + the host through the specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs in ExpectedIPs. + Therefore, when InterfaceName is empty, at least one expected IP must be specified. Only + external interfaces (such as "eth0") are supported here; it isn't possible for a + HostEndpoint to protect traffic through a specific local workload interface. + + Note: Only some kinds of policy are implemented for "*" HostEndpoints; initially just + pre-DNAT policy. Please check Calico documentation for the latest position. type: string node: description: The node name identifying the Calico node instance. type: string ports: - description: Ports contains the endpoint's named ports, which may - be referenced in security policy rules. + description: Ports contains the endpoint's named ports, which may be referenced in security policy rules. items: properties: name: @@ -2431,10 +3078,10 @@ spec: type: object type: array profiles: - description: A list of identifiers of security Profile objects that - apply to this endpoint. Each profile is applied in the order that - they appear in this list. Profile rules are applied after the selector-based - security policy. + description: |- + A list of identifiers of security Profile objects that apply to this endpoint. Each + profile is applied in the order that they appear in this list. Profile rules are applied + after the selector-based security policy. items: type: string type: array @@ -2442,17 +3089,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: ipamblocks.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2461,6 +3104,7 @@ spec: listKind: IPAMBlockList plural: ipamblocks singular: ipamblock + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2468,32 +3112,35 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: IPAMBlockSpec contains the specification for an IPAMBlock - resource. + description: IPAMBlockSpec contains the specification for an IPAMBlock resource. properties: affinity: - description: Affinity of the block, if this block has one. If set, - it will be of the form "host:". If not set, this block - is not affine to a host. + description: |- + Affinity of the block, if this block has one. If set, it will be of the form + "host:". If not set, this block is not affine to a host. type: string allocations: - description: Array of allocations in-use within this block. nil entries - mean the allocation is free. For non-nil entries at index i, the - index is the ordinal of the allocation within this block and the - value is the index of the associated attributes in the Attributes - array. + description: |- + Array of allocations in-use within this block. nil entries mean the allocation is free. + For non-nil entries at index i, the index is the ordinal of the allocation within this block + and the value is the index of the associated attributes in the Attributes array. items: type: integer # TODO: This nullable is manually added in. We should update controller-gen @@ -2501,9 +3148,9 @@ spec: nullable: true type: array attributes: - description: Attributes is an array of arbitrary metadata associated - with allocations in the block. To find attributes for a given allocation, - use the value of the allocation's entry in the Allocations array + description: |- + Attributes is an array of arbitrary metadata associated with allocations in the block. To find + attributes for a given allocation, use the value of the allocation's entry in the Allocations array as the index of the element in this array. items: properties: @@ -2519,35 +3166,34 @@ spec: description: The block's CIDR. type: string deleted: - description: Deleted is an internal boolean used to workaround a limitation - in the Kubernetes API whereby deletion will not return a conflict - error if the block has been updated. It should not be set manually. + description: |- + Deleted is an internal boolean used to workaround a limitation in the Kubernetes API whereby + deletion will not return a conflict error if the block has been updated. It should not be set manually. type: boolean sequenceNumber: default: 0 - description: We store a sequence number that is updated each time - the block is written. Each allocation will also store the sequence - number of the block at the time of its creation. When releasing - an IP, passing the sequence number associated with the allocation - allows us to protect against a race condition and ensure the IP - hasn't been released and re-allocated since the release request. + description: |- + We store a sequence number that is updated each time the block is written. + Each allocation will also store the sequence number of the block at the time of its creation. + When releasing an IP, passing the sequence number associated with the allocation allows us + to protect against a race condition and ensure the IP hasn't been released and re-allocated + since the release request. format: int64 type: integer sequenceNumberForAllocation: additionalProperties: format: int64 type: integer - description: Map of allocated ordinal within the block to sequence - number of the block at the time of allocation. Kubernetes does not - allow numerical keys for maps, so the key is cast to a string. + description: |- + Map of allocated ordinal within the block to sequence number of the block at + the time of allocation. Kubernetes does not allow numerical keys for maps, so + the key is cast to a string. type: object strictAffinity: - description: StrictAffinity on the IPAMBlock is deprecated and no - longer used by the code. Use IPAMConfig StrictAffinity instead. + description: StrictAffinity on the IPAMBlock is deprecated and no longer used by the code. Use IPAMConfig StrictAffinity instead. type: boolean unallocated: - description: Unallocated is an ordered list of allocations which are - free in the block. + description: Unallocated is an ordered list of allocations which are free in the block. items: type: integer type: array @@ -2561,17 +3207,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: ipamconfigs.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2580,6 +3222,7 @@ spec: listKind: IPAMConfigList plural: ipamconfigs singular: ipamconfig + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2587,26 +3230,33 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: IPAMConfigSpec contains the specification for an IPAMConfig - resource. + description: IPAMConfigSpec contains the specification for an IPAMConfig resource. properties: autoAllocateBlocks: type: boolean maxBlocksPerHost: - description: MaxBlocksPerHost, if non-zero, is the max number of blocks - that can be affine to each host. + description: |- + MaxBlocksPerHost, if non-zero, is the max number of blocks that can be + affine to each host. + maximum: 2147483647 + minimum: 0 type: integer strictAffinity: type: boolean @@ -2617,17 +3267,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: ipamhandles.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2636,6 +3282,7 @@ spec: listKind: IPAMHandleList plural: ipamhandles singular: ipamhandle + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2643,20 +3290,24 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: IPAMHandleSpec contains the specification for an IPAMHandle - resource. + description: IPAMHandleSpec contains the specification for an IPAMHandle resource. properties: block: additionalProperties: @@ -2673,17 +3324,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: ippools.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2692,6 +3339,7 @@ spec: listKind: IPPoolList plural: ippools singular: ippool + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2699,14 +3347,19 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -2714,67 +3367,71 @@ spec: description: IPPoolSpec contains the specification for an IPPool resource. properties: allowedUses: - description: AllowedUse controls what the IP pool will be used for. If - not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + description: |- + AllowedUse controls what the IP pool will be used for. If not specified or empty, defaults to + ["Tunnel", "Workload"] for back-compatibility items: type: string type: array + assignmentMode: + description: Determines the mode how IP addresses should be assigned from this pool + enum: + - Automatic + - Manual + type: string blockSize: - description: The block size to use for IP address assignments from - this pool. Defaults to 26 for IPv4 and 122 for IPv6. + description: The block size to use for IP address assignments from this pool. Defaults to 26 for IPv4 and 122 for IPv6. type: integer cidr: description: The pool CIDR. type: string disableBGPExport: - description: 'Disable exporting routes from this IP Pool''s CIDR over - BGP. [Default: false]' + description: 'Disable exporting routes from this IP Pool''s CIDR over BGP. [Default: false]' type: boolean disabled: - description: When disabled is true, Calico IPAM will not assign addresses - from this pool. + description: When disabled is true, Calico IPAM will not assign addresses from this pool. type: boolean ipip: - description: 'Deprecated: this field is only used for APIv1 backwards - compatibility. Setting this field is not allowed, this field is - for internal use only.' + description: |- + Deprecated: this field is only used for APIv1 backwards compatibility. + Setting this field is not allowed, this field is for internal use only. properties: enabled: - description: When enabled is true, ipip tunneling will be used - to deliver packets to destinations within this pool. + description: |- + When enabled is true, ipip tunneling will be used to deliver packets to + destinations within this pool. type: boolean mode: - description: The IPIP mode. This can be one of "always" or "cross-subnet". A - mode of "always" will also use IPIP tunneling for routing to - destination IP addresses within this pool. A mode of "cross-subnet" - will only use IPIP tunneling when the destination node is on - a different subnet to the originating node. The default value - (if not specified) is "always". + description: |- + The IPIP mode. This can be one of "always" or "cross-subnet". A mode + of "always" will also use IPIP tunneling for routing to destination IP + addresses within this pool. A mode of "cross-subnet" will only use IPIP + tunneling when the destination node is on a different subnet to the + originating node. The default value (if not specified) is "always". type: string type: object ipipMode: - description: Contains configuration for IPIP tunneling for this pool. - If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling - is disabled). + description: |- + Contains configuration for IPIP tunneling for this pool. If not specified, + then this is defaulted to "Never" (i.e. IPIP tunneling is disabled). type: string nat-outgoing: - description: 'Deprecated: this field is only used for APIv1 backwards - compatibility. Setting this field is not allowed, this field is - for internal use only.' + description: |- + Deprecated: this field is only used for APIv1 backwards compatibility. + Setting this field is not allowed, this field is for internal use only. type: boolean natOutgoing: - description: When nat-outgoing is true, packets sent from Calico networked - containers in this pool to destinations outside of this pool will - be masqueraded. + description: |- + When natOutgoing is true, packets sent from Calico networked containers in + this pool to destinations outside of this pool will be masqueraded. type: boolean nodeSelector: - description: Allows IPPool to allocate for a specific node by label - selector. + description: Allows IPPool to allocate for a specific node by label selector. type: string vxlanMode: - description: Contains configuration for VXLAN tunneling for this pool. - If not specified, then this is defaulted to "Never" (i.e. VXLAN - tunneling is disabled). + description: |- + Contains configuration for VXLAN tunneling for this pool. If not specified, + then this is defaulted to "Never" (i.e. VXLAN tunneling is disabled). type: string required: - cidr @@ -2782,20 +3439,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: (devel) - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.17.3 name: ipreservations.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2804,6 +3454,7 @@ spec: listKind: IPReservationList plural: ipreservations singular: ipreservation + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2811,24 +3462,27 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: IPReservationSpec contains the specification for an IPReservation - resource. + description: IPReservationSpec contains the specification for an IPReservation resource. properties: reservedCIDRs: - description: ReservedCIDRs is a list of CIDRs and/or IP addresses - that Calico IPAM will exclude from new allocations. + description: ReservedCIDRs is a list of CIDRs and/or IP addresses that Calico IPAM will exclude from new allocations. items: type: string type: array @@ -2836,17 +3490,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: kubecontrollersconfigurations.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -2855,6 +3505,7 @@ spec: listKind: KubeControllersConfigurationList plural: kubecontrollersconfigurations singular: kubecontrollersconfiguration + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2862,225 +3513,249 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: KubeControllersConfigurationSpec contains the values of the - Kubernetes controllers configuration. + description: KubeControllersConfigurationSpec contains the values of the Kubernetes controllers configuration. properties: controllers: - description: Controllers enables and configures individual Kubernetes - controllers + description: Controllers enables and configures individual Kubernetes controllers properties: + loadBalancer: + description: LoadBalancer enables and configures the LoadBalancer controller. Enabled by default, set to nil to disable. + properties: + assignIPs: + type: string + type: object namespace: - description: Namespace enables and configures the namespace controller. - Enabled by default, set to nil to disable. + description: Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform reconciliation - with the Calico datastore. [Default: 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object node: - description: Node enables and configures the node controller. - Enabled by default, set to nil to disable. + description: Node enables and configures the node controller. Enabled by default, set to nil to disable. properties: hostEndpoint: - description: HostEndpoint controls syncing nodes to host endpoints. - Disabled by default, set to nil to disable. + description: HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable. properties: autoCreate: - description: 'AutoCreate enables automatic creation of - host endpoints for every node. [Default: Disabled]' + description: 'AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled]' + type: string + createDefaultHostEndpoint: type: string + templates: + description: Templates contains definition for creating AutoHostEndpoints + items: + properties: + generateName: + description: GenerateName is appended to the end of the generated AutoHostEndpoint name + type: string + interfaceCIDRs: + description: InterfaceCIDRs contains a list of CIRDs used for matching nodeIPs to the AutoHostEndpoint + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels adds the specified labels to the generated AutoHostEndpoint, labels from node with the same name will be overwritten by values from the template label + type: object + nodeSelector: + description: NodeSelector allows the AutoHostEndpoint to be created only for specific nodes + type: string + type: object + type: array type: object leakGracePeriod: - description: 'LeakGracePeriod is the period used by the controller - to determine if an IP address has been leaked. Set to 0 - to disable IP garbage collection. [Default: 15m]' + description: |- + LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: 15m] type: string reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform reconciliation - with the Calico datastore. [Default: 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string syncLabels: - description: 'SyncLabels controls whether to copy Kubernetes - node labels to Calico nodes. [Default: Enabled]' + description: 'SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled]' type: string type: object policy: - description: Policy enables and configures the policy controller. - Enabled by default, set to nil to disable. + description: Policy enables and configures the policy controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform reconciliation - with the Calico datastore. [Default: 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object serviceAccount: - description: ServiceAccount enables and configures the service - account controller. Enabled by default, set to nil to disable. + description: ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform reconciliation - with the Calico datastore. [Default: 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object workloadEndpoint: - description: WorkloadEndpoint enables and configures the workload - endpoint controller. Enabled by default, set to nil to disable. + description: WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform reconciliation - with the Calico datastore. [Default: 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object type: object debugProfilePort: - description: DebugProfilePort configures the port to serve memory - and cpu profiles on. If not specified, profiling is disabled. + description: |- + DebugProfilePort configures the port to serve memory and cpu profiles on. If not specified, profiling + is disabled. format: int32 type: integer etcdV3CompactionPeriod: - description: 'EtcdV3CompactionPeriod is the period between etcdv3 - compaction requests. Set to 0 to disable. [Default: 10m]' + description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' type: string healthChecks: - description: 'HealthChecks enables or disables support for health - checks [Default: Enabled]' + description: 'HealthChecks enables or disables support for health checks [Default: Enabled]' type: string logSeverityScreen: - description: 'LogSeverityScreen is the log severity above which logs - are sent to the stdout. [Default: Info]' + description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string prometheusMetricsPort: - description: 'PrometheusMetricsPort is the TCP port that the Prometheus - metrics server should bind to. Set to 0 to disable. [Default: 9094]' + description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094]' type: integer required: - controllers type: object status: - description: KubeControllersConfigurationStatus represents the status - of the configuration. It's useful for admins to be able to see the actual - config that was applied, which can be modified by environment variables - on the kube-controllers process. + description: |- + KubeControllersConfigurationStatus represents the status of the configuration. It's useful for admins to + be able to see the actual config that was applied, which can be modified by environment variables on the + kube-controllers process. properties: environmentVars: additionalProperties: type: string - description: EnvironmentVars contains the environment variables on - the kube-controllers that influenced the RunningConfig. + description: |- + EnvironmentVars contains the environment variables on the kube-controllers that influenced + the RunningConfig. type: object runningConfig: - description: RunningConfig contains the effective config that is running - in the kube-controllers pod, after merging the API resource with - any environment variables. + description: |- + RunningConfig contains the effective config that is running in the kube-controllers pod, after + merging the API resource with any environment variables. properties: controllers: - description: Controllers enables and configures individual Kubernetes - controllers + description: Controllers enables and configures individual Kubernetes controllers properties: + loadBalancer: + description: LoadBalancer enables and configures the LoadBalancer controller. Enabled by default, set to nil to disable. + properties: + assignIPs: + type: string + type: object namespace: - description: Namespace enables and configures the namespace - controller. Enabled by default, set to nil to disable. + description: Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform - reconciliation with the Calico datastore. [Default: - 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object node: - description: Node enables and configures the node controller. - Enabled by default, set to nil to disable. + description: Node enables and configures the node controller. Enabled by default, set to nil to disable. properties: hostEndpoint: - description: HostEndpoint controls syncing nodes to host - endpoints. Disabled by default, set to nil to disable. + description: HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable. properties: autoCreate: - description: 'AutoCreate enables automatic creation - of host endpoints for every node. [Default: Disabled]' + description: 'AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled]' + type: string + createDefaultHostEndpoint: type: string + templates: + description: Templates contains definition for creating AutoHostEndpoints + items: + properties: + generateName: + description: GenerateName is appended to the end of the generated AutoHostEndpoint name + type: string + interfaceCIDRs: + description: InterfaceCIDRs contains a list of CIRDs used for matching nodeIPs to the AutoHostEndpoint + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels adds the specified labels to the generated AutoHostEndpoint, labels from node with the same name will be overwritten by values from the template label + type: object + nodeSelector: + description: NodeSelector allows the AutoHostEndpoint to be created only for specific nodes + type: string + type: object + type: array type: object leakGracePeriod: - description: 'LeakGracePeriod is the period used by the - controller to determine if an IP address has been leaked. - Set to 0 to disable IP garbage collection. [Default: - 15m]' + description: |- + LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: 15m] type: string reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform - reconciliation with the Calico datastore. [Default: - 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string syncLabels: - description: 'SyncLabels controls whether to copy Kubernetes - node labels to Calico nodes. [Default: Enabled]' + description: 'SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled]' type: string type: object policy: - description: Policy enables and configures the policy controller. - Enabled by default, set to nil to disable. + description: Policy enables and configures the policy controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform - reconciliation with the Calico datastore. [Default: - 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object serviceAccount: - description: ServiceAccount enables and configures the service - account controller. Enabled by default, set to nil to disable. + description: ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform - reconciliation with the Calico datastore. [Default: - 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object workloadEndpoint: - description: WorkloadEndpoint enables and configures the workload - endpoint controller. Enabled by default, set to nil to disable. + description: WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable. properties: reconcilerPeriod: - description: 'ReconcilerPeriod is the period to perform - reconciliation with the Calico datastore. [Default: - 5m]' + description: 'ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m]' type: string type: object type: object debugProfilePort: - description: DebugProfilePort configures the port to serve memory - and cpu profiles on. If not specified, profiling is disabled. + description: |- + DebugProfilePort configures the port to serve memory and cpu profiles on. If not specified, profiling + is disabled. format: int32 type: integer etcdV3CompactionPeriod: - description: 'EtcdV3CompactionPeriod is the period between etcdv3 - compaction requests. Set to 0 to disable. [Default: 10m]' + description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' type: string healthChecks: - description: 'HealthChecks enables or disables support for health - checks [Default: Enabled]' + description: 'HealthChecks enables or disables support for health checks [Default: Enabled]' type: string logSeverityScreen: - description: 'LogSeverityScreen is the log severity above which - logs are sent to the stdout. [Default: Info]' + description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' type: string prometheusMetricsPort: - description: 'PrometheusMetricsPort is the TCP port that the Prometheus - metrics server should bind to. Set to 0 to disable. [Default: - 9094]' + description: 'PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094]' type: integer required: - controllers @@ -3089,17 +3764,13 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: networkpolicies.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -3108,6 +3779,7 @@ spec: listKind: NetworkPolicyList plural: networkpolicies singular: networkpolicy + preserveUnknownFields: false scope: Namespaced versions: - name: v1 @@ -3115,73 +3787,76 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: egress: - description: The ordered set of egress rules. Each rule contains - a set of packet match criteria and a corresponding action to apply. + description: |- + The ordered set of egress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. items: - description: "A Rule encapsulates a set of match criteria and an - action. Both selector-based security Policy and security Profiles - reference rules - separated out as a list of rules for both ingress - and egress packet matching. \n Each positive match criteria has - a negated version, prefixed with \"Not\". All the match criteria - within a rule must be satisfied for a packet to match. A single - rule can contain the positive and negative version of a match - and both must be satisfied for the rule to match." + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. properties: action: type: string destination: - description: Destination contains the match criteria that apply - to destination entity. + description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3190,18 +3865,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3210,91 +3885,73 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object http: - description: HTTP contains match criteria that apply to HTTP - requests. + description: HTTP contains match criteria that apply to HTTP requests. properties: methods: - description: Methods is an optional field that restricts - the rule to apply only to HTTP requests that use one of - the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple - methods are OR'd together. + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. items: type: string type: array paths: - description: 'Paths is an optional field that restricts - the rule to apply to HTTP requests that use one of the - listed HTTP Paths. Multiple paths are OR''d together. - e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a `exact` or a `prefix` match. The - validator will check for it.' + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. items: - description: 'HTTPPath specifies an HTTP path to match. - It may be either of the form: exact: : which matches - the path exactly or prefix: : which matches - the path prefix' + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix properties: exact: type: string @@ -3304,110 +3961,108 @@ spec: type: array type: object icmp: - description: ICMP is an optional field that restricts the rule - to apply to a specific type and code of ICMP traffic. This - should only be specified if the Protocol field is set to "ICMP" - or "ICMPv6". + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object ipVersion: - description: IPVersion is an optional field that restricts the - rule to only match a specific IP version. + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. type: integer metadata: - description: Metadata contains additional information for this - rule + description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string - description: Annotations is a set of key value pairs that - give extra information about the rule + description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string - description: NotProtocol is the negated version of the Protocol - field. + description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string - description: "Protocol is an optional field that restricts the - rule to only apply to traffic of a specific IP protocol. Required - if any of the EntityRules contain Ports (because ports only - apply to certain protocols). \n Must be one of these string - values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", - \"UDPLite\" or an integer in the range 1-255." + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. pattern: ^.* x-kubernetes-int-or-string: true source: - description: Source contains the match criteria that apply to - source entity. + description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3416,18 +4071,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3436,64 +4091,45 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object @@ -3502,59 +4138,57 @@ spec: type: object type: array ingress: - description: The ordered set of ingress rules. Each rule contains - a set of packet match criteria and a corresponding action to apply. + description: |- + The ordered set of ingress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. items: - description: "A Rule encapsulates a set of match criteria and an - action. Both selector-based security Policy and security Profiles - reference rules - separated out as a list of rules for both ingress - and egress packet matching. \n Each positive match criteria has - a negated version, prefixed with \"Not\". All the match criteria - within a rule must be satisfied for a packet to match. A single - rule can contain the positive and negative version of a match - and both must be satisfied for the rule to match." + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. properties: action: type: string destination: - description: Destination contains the match criteria that apply - to destination entity. + description: Destination contains the match criteria that apply to destination entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3563,18 +4197,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3583,91 +4217,73 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object http: - description: HTTP contains match criteria that apply to HTTP - requests. + description: HTTP contains match criteria that apply to HTTP requests. properties: methods: - description: Methods is an optional field that restricts - the rule to apply only to HTTP requests that use one of - the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple - methods are OR'd together. + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. items: type: string type: array paths: - description: 'Paths is an optional field that restricts - the rule to apply to HTTP requests that use one of the - listed HTTP Paths. Multiple paths are OR''d together. - e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a `exact` or a `prefix` match. The - validator will check for it.' + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. items: - description: 'HTTPPath specifies an HTTP path to match. - It may be either of the form: exact: : which matches - the path exactly or prefix: : which matches - the path prefix' + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix properties: exact: type: string @@ -3677,110 +4293,108 @@ spec: type: array type: object icmp: - description: ICMP is an optional field that restricts the rule - to apply to a specific type and code of ICMP traffic. This - should only be specified if the Protocol field is set to "ICMP" - or "ICMPv6". + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object ipVersion: - description: IPVersion is an optional field that restricts the - rule to only match a specific IP version. + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. type: integer metadata: - description: Metadata contains additional information for this - rule + description: Metadata contains additional information for this rule properties: annotations: additionalProperties: type: string - description: Annotations is a set of key value pairs that - give extra information about the rule + description: Annotations is a set of key value pairs that give extra information about the rule type: object type: object notICMP: description: NotICMP is the negated version of the ICMP field. properties: code: - description: Match on a specific ICMP code. If specified, - the Type value must also be specified. This is a technical - limitation imposed by the kernel's iptables firewall, - which Calico uses to enforce the rule. + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. type: integer type: - description: Match on a specific ICMP type. For example - a value of 8 refers to ICMP Echo Request (i.e. pings). + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). type: integer type: object notProtocol: anyOf: - type: integer - type: string - description: NotProtocol is the negated version of the Protocol - field. + description: NotProtocol is the negated version of the Protocol field. pattern: ^.* x-kubernetes-int-or-string: true protocol: anyOf: - type: integer - type: string - description: "Protocol is an optional field that restricts the - rule to only apply to traffic of a specific IP protocol. Required - if any of the EntityRules contain Ports (because ports only - apply to certain protocols). \n Must be one of these string - values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", - \"UDPLite\" or an integer in the range 1-255." + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. pattern: ^.* x-kubernetes-int-or-string: true source: - description: Source contains the match criteria that apply to - source entity. + description: Source contains the match criteria that apply to source entity. properties: namespaceSelector: - description: "NamespaceSelector is an optional field that - contains a selector expression. Only traffic that originates - from (or terminates at) endpoints within the selected - namespaces will be matched. When both NamespaceSelector - and another selector are defined on the same rule, then - only workload endpoints that are matched by both selectors - will be selected by the rule. \n For NetworkPolicy, an - empty NamespaceSelector implies that the Selector is limited - to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, `global()` - NamespaceSelector implies that the Selector is limited - to selecting only GlobalNetworkSet or HostEndpoint. \n - For GlobalNetworkPolicy, an empty NamespaceSelector implies - the Selector applies to workload endpoints across all - namespaces." + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. type: string nets: - description: Nets is an optional field that restricts the - rule to only apply to traffic that originates from (or - terminates at) IP addresses in any of the given subnets. + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. items: type: string type: array notNets: - description: NotNets is the negated version of the Nets - field. + description: NotNets is the negated version of the Nets field. items: type: string type: array notPorts: - description: NotPorts is the negated version of the Ports - field. Since only some protocols have ports, if any ports - are specified it requires the Protocol match in the Rule - to be set to "TCP" or "UDP". + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3789,18 +4403,18 @@ spec: x-kubernetes-int-or-string: true type: array notSelector: - description: NotSelector is the negated version of the Selector - field. See Selector field for subtleties with negated - selectors. + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. type: string ports: - description: "Ports is an optional field that restricts - the rule to only apply to traffic that has a source (destination) - port that matches one of these ranges/values. This value - is a list of integers or strings that represent ranges - of ports. \n Since only some protocols have ports, if - any ports are specified it requires the Protocol match - in the Rule to be set to \"TCP\" or \"UDP\"." + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". items: anyOf: - type: integer @@ -3809,64 +4423,45 @@ spec: x-kubernetes-int-or-string: true type: array selector: - description: "Selector is an optional field that contains - a selector expression (see Policy for sample syntax). - \ Only traffic that originates from (terminates at) endpoints - matching the selector will be matched. \n Note that: in - addition to the negated version of the Selector (see NotSelector - below), the selector expression syntax itself supports - negation. The two types of negation are subtly different. - One negates the set of matched endpoints, the other negates - the whole match: \n \tSelector = \"!has(my_label)\" matches - packets that are from other Calico-controlled \tendpoints - that do not have the label \"my_label\". \n \tNotSelector - = \"has(my_label)\" matches packets that are not from - Calico-controlled \tendpoints that do have the label \"my_label\". - \n The effect is that the latter will accept packets from - non-Calico sources whereas the former is limited to packets - from Calico-controlled endpoints." + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." type: string serviceAccounts: - description: ServiceAccounts is an optional field that restricts - the rule to only apply to traffic that originates from - (or terminates at) a pod running as a matching service - account. + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. properties: names: - description: Names is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account whose name is in the list. + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. items: type: string type: array selector: - description: Selector is an optional field that restricts - the rule to only apply to traffic that originates - from (or terminates at) a pod running as a service - account that matches the given label selector. If - both Names and Selector are specified then they are - AND'ed. + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. type: string type: object services: - description: "Services is an optional field that contains - options for matching Kubernetes Services. If specified, - only traffic that originates from or terminates at endpoints - within the selected service(s) will be matched, and only - to/from each endpoint's port. \n Services cannot be specified - on the same rule as Selector, NotSelector, NamespaceSelector, - Nets, NotNets or ServiceAccounts. \n Ports and NotPorts - can only be specified with Services on ingress rules." + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. properties: name: - description: Name specifies the name of a Kubernetes - Service to match. + description: Name specifies the name of a Kubernetes Service to match. type: string namespace: - description: Namespace specifies the namespace of the - given Service. If left empty, the rule will match - within this policy's namespace. + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. type: string type: object type: object @@ -3875,67 +4470,73 @@ spec: type: object type: array order: - description: Order is an optional field that specifies the order in - which the policy is applied. Policies with higher "order" are applied - after those with lower order. If the order is omitted, it may be - considered to be "infinite" - i.e. the policy will be applied last. Policies - with identical order will be applied in alphanumerical order based - on the Policy "Name". + description: |- + Order is an optional field that specifies the order in which the policy is applied. + Policies with higher "order" are applied after those with lower + order within the same tier. If the order is omitted, it may be considered to be "infinite" - i.e. the + policy will be applied last. Policies with identical order will be applied in + alphanumerical order based on the Policy "Name" within the tier. type: number + performanceHints: + description: |- + PerformanceHints contains a list of hints to Calico's policy engine to + help process the policy more efficiently. Hints never change the + enforcement behaviour of the policy. + + Currently, the only available hint is "AssumeNeededOnEveryNode". When + that hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for "preloading" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work + done to preload the policy (and to maintain it) is wasted. + items: + type: string + type: array selector: - description: "The selector is an expression used to pick pick out - the endpoints that the policy should be applied to. \n Selector - expressions follow this syntax: \n \tlabel == \"string_literal\" - \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" - \ -> not equal; also matches if label is not present \tlabel in - { \"a\", \"b\", \"c\", ... } -> true if the value of label X is - one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", - ... } -> true if the value of label X is not one of \"a\", \"b\", - \"c\" \thas(label_name) -> True if that label is present \t! expr - -> negation of expr \texpr && expr -> Short-circuit and \texpr - || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() - or the empty selector -> matches all endpoints. \n Label names are - allowed to contain alphanumerics, -, _ and /. String literals are - more permissive but they do not support escape characters. \n Examples - (with made-up labels): \n \ttype == \"webserver\" && deployment - == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != - \"dev\" \t! has(label_name)" + description: "The selector is an expression used to pick out the endpoints that the policy should\nbe applied to.\n\nSelector expressions follow this syntax:\n\n\tlabel == \"string_literal\" -> comparison, e.g. my_label == \"foo bar\"\n\tlabel != \"string_literal\" -> not equal; also matches if label is not present\n\tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\"\n\tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\"\n\thas(label_name) -> True if that label is present\n\t! expr -> negation of expr\n\texpr && expr -> Short-circuit and\n\texpr || expr -> Short-circuit or\n\t( expr ) -> parens for grouping\n\tall() or the empty selector -> matches all endpoints.\n\nLabel names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive\nbut they do not support escape characters.\n\nExamples (with made-up labels):\n\n\ttype == \"webserver\" && deployment == \"prod\"\n\ttype in {\"frontend\", \"backend\"}\n\tdeployment != \"dev\"\n\t! has(label_name)" type: string serviceAccountSelector: - description: ServiceAccountSelector is an optional field for an expression - used to select a pod based on service accounts. + description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. + type: string + tier: + description: |- + The name of the tier that this policy belongs to. If this is omitted, the default + tier (name is "default") is assumed. The specified tier must exist in order to create + security policies within the tier, the "default" tier is created automatically if it + does not exist, this means for deployments requiring only a single Tier, the tier name + may be omitted on all policy management requests. type: string types: - description: "Types indicates whether this policy applies to ingress, - or to egress, or to both. When not explicitly specified (and so - the value on creation is empty or nil), Calico defaults Types according - to what Ingress and Egress are present in the policy. The default - is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including - the case where there are also no Ingress rules) \n - [ PolicyTypeEgress - ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, - PolicyTypeEgress ], if there are both Ingress and Egress rules. - \n When the policy is read back again, Types will always be one - of these values, never empty or nil." + description: |- + Types indicates whether this policy applies to ingress, or to egress, or to both. When + not explicitly specified (and so the value on creation is empty or nil), Calico defaults + Types according to what Ingress and Egress are present in the policy. The + default is: + + - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are + also no Ingress rules) + + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules + + - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. + + When the policy is read back again, Types will always be one of these values, never empty + or nil. items: - description: PolicyType enumerates the possible values of the PolicySpec - Types field. + description: PolicyType enumerates the possible values of the PolicySpec Types field. type: string type: array type: object type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 name: networksets.crd.projectcalico.org spec: group: crd.projectcalico.org @@ -3944,6 +4545,7 @@ spec: listKind: NetworkSetList plural: networksets singular: networkset + preserveUnknownFields: false scope: Namespaced versions: - name: v1 @@ -3952,20 +4554,24 @@ spec: description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: NetworkSetSpec contains the specification for a NetworkSet - resource. + description: NetworkSetSpec contains the specification for a NetworkSet resource. properties: nets: description: The list of IP networks that belong to this set. @@ -3976,279 +4582,4535 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - ---- ---- -# Source: calico/templates/calico-kube-controllers-rbac.yaml - -# Include a clusterrole for the kube-controllers component, -# and bind it to the calico-kube-controllers serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers -rules: - # Nodes are watched to monitor for deletions. - - apiGroups: [""] - resources: - - nodes - verbs: - - watch - - list - - get - # Pods are watched to check for existence as part of IPAM controller. - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipreservations - verbs: - - list - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - watch - # Pools are watched to maintain a mapping of blocks to IP pools. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - verbs: - - list - - watch - # kube-controllers manages hostendpoints. - - apiGroups: ["crd.projectcalico.org"] - resources: - - hostendpoints - verbs: - - get - - list - - create - - update - - delete - # Needs access to update clusterinformations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - clusterinformations - verbs: - - get - - list - - create - - update - - watch - # KubeControllersConfiguration is where it gets its config - - apiGroups: ["crd.projectcalico.org"] - resources: - - kubecontrollersconfigurations - verbs: - # read its own config - - get - # create a default if none exists - - create - # update status - - update - # watch for changes - - watch --- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: - name: calico-kube-controllers -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system ---- - ---- -# Source: calico/templates/calico-node-rbac.yaml -# Include a clusterrole for the calico-node DaemonSet, + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: stagedglobalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: StagedGlobalNetworkPolicy + listKind: StagedGlobalNetworkPolicyList + plural: stagedglobalnetworkpolicies + singular: stagedglobalnetworkpolicy + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy on forward traffic. + type: boolean + doNotTrack: + description: |- + DoNotTrack indicates whether packets matched by the rules in this policy should go through + the data plane's connection tracking, such as Linux conntrack. If True, the rules in + this policy are applied before any data plane connection tracking, and packets allowed by + this policy are marked as not to be tracked. + type: boolean + egress: + description: |- + The ordered set of egress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. + items: + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply to destination entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP requests. + properties: + methods: + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. + items: + type: string + type: array + paths: + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. + items: + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + ipVersion: + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. + type: integer + metadata: + description: Metadata contains additional information for this rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to source entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: |- + The ordered set of ingress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. + items: + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply to destination entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP requests. + properties: + methods: + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. + items: + type: string + type: array + paths: + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. + items: + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + ipVersion: + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. + type: integer + metadata: + description: Metadata contains additional information for this rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to source entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression used to select a pod based on namespaces. + type: string + order: + description: |- + Order is an optional field that specifies the order in which the policy is applied. + Policies with higher "order" are applied after those with lower + order within the same tier. If the order is omitted, it may be considered to be "infinite" - i.e. the + policy will be applied last. Policies with identical order will be applied in + alphanumerical order based on the Policy "Name" within the tier. + type: number + performanceHints: + description: |- + PerformanceHints contains a list of hints to Calico's policy engine to + help process the policy more efficiently. Hints never change the + enforcement behaviour of the policy. + + Currently, the only available hint is "AssumeNeededOnEveryNode". When + that hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for "preloading" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work + done to preload the policy (and to maintain it) is wasted. + items: + type: string + type: array + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out the endpoints that the policy should\nbe applied to.\n\nSelector expressions follow this syntax:\n\n\tlabel == \"string_literal\" -> comparison, e.g. my_label == \"foo bar\"\n\tlabel != \"string_literal\" -> not equal; also matches if label is not present\n\tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\"\n\tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\"\n\thas(label_name) -> True if that label is present\n\t! expr -> negation of expr\n\texpr && expr -> Short-circuit and\n\texpr || expr -> Short-circuit or\n\t( expr ) -> parens for grouping\n\tall() or the empty selector -> matches all endpoints.\n\nLabel names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive\nbut they do not support escape characters.\n\nExamples (with made-up labels):\n\n\ttype == \"webserver\" && deployment == \"prod\"\n\ttype in {\"frontend\", \"backend\"}\n\tdeployment != \"dev\"\n\t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. + type: string + stagedAction: + description: The staged action. If this is omitted, the default is Set. + type: string + tier: + description: |- + The name of the tier that this policy belongs to. If this is omitted, the default + tier (name is "default") is assumed. The specified tier must exist in order to create + security policies within the tier, the "default" tier is created automatically if it + does not exist, this means for deployments requiring only a single Tier, the tier name + may be omitted on all policy management requests. + type: string + types: + description: |- + Types indicates whether this policy applies to ingress, or to egress, or to both. When + not explicitly specified (and so the value on creation is empty or nil), Calico defaults + Types according to what Ingress and Egress rules are present in the policy. The + default is: + + - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are + also no Ingress rules) + + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules + + - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. + + When the policy is read back again, Types will always be one of these values, never empty + or nil. + items: + description: PolicyType enumerates the possible values of the PolicySpec Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: stagedkubernetesnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: StagedKubernetesNetworkPolicy + listKind: StagedKubernetesNetworkPolicyList + plural: stagedkubernetesnetworkpolicies + singular: stagedkubernetesnetworkpolicy + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + egress: + description: |- + List of egress rules to be applied to the selected pods. Outgoing traffic is + allowed if there are no NetworkPolicies selecting the pod (and cluster policy + otherwise allows the traffic), OR if the traffic matches at least one egress rule + across all of the NetworkPolicy objects whose podSelector matches the pod. If + this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + solely to ensure that the pods it selects are isolated by default). + This field is beta-level in 1.8 + items: + description: |- + NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods + matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. + This type is beta-level in 1.8 + properties: + ports: + description: |- + ports is a list of destination ports for outgoing traffic. + Each item in this list is combined using a logical OR. If this field is + empty or missing, this rule matches all ports (traffic not restricted by port). + If this field is present and contains at least one item, then this rule allows + traffic only if the traffic matches at least one port in the list. + items: + description: NetworkPolicyPort describes a port to allow traffic on + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be allowed by the policy. This field cannot be defined if the port field + is not defined or if the port field is defined as a named (string) port. + The endPort must be equal or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: |- + port represents the port on the given protocol. This can either be a numerical or named + port on a pod. If this field is not provided, this matches all port names and + numbers. + If present, only traffic on the specified protocol AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + description: |- + protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + If not specified, this field defaults to TCP. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + to: + description: |- + to is a list of destinations for outgoing traffic of pods selected for this rule. + Items in this list are combined using a logical OR operation. If this field is + empty or missing, this rule matches all destinations (traffic not restricted by + destination). If this field is present and contains at least one item, this rule + allows traffic only if the traffic matches at least one item in the to list. + items: + description: |- + NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of + fields are allowed + properties: + ipBlock: + description: |- + ipBlock defines policy on a particular IPBlock. If this field is set then + neither of the other fields can be. + properties: + cidr: + description: |- + cidr is a string representing the IPBlock + Valid examples are "192.168.1.0/24" or "2001:db8::/64" + type: string + except: + description: |- + except is a slice of CIDRs that should not be included within an IPBlock + Valid examples are "192.168.1.0/24" or "2001:db8::/64" + Except values will be rejected if they are outside the cidr range + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - cidr + type: object + namespaceSelector: + description: |- + namespaceSelector selects namespaces using cluster-scoped labels. This field follows + standard label selector semantics; if present but empty, it selects all namespaces. + + If podSelector is also set, then the NetworkPolicyPeer as a whole selects + the pods matching podSelector in the namespaces selected by namespaceSelector. + Otherwise it selects all pods in the namespaces selected by namespaceSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + podSelector is a label selector which selects pods. This field follows standard label + selector semantics; if present but empty, it selects all pods. + + If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + the pods matching podSelector in the Namespaces selected by NamespaceSelector. + Otherwise it selects the pods matching podSelector in the policy's own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: array + ingress: + description: |- + List of ingress rules to be applied to the selected pods. Traffic is allowed to + a pod if there are no NetworkPolicies selecting the pod + (and cluster policy otherwise allows the traffic), OR if the traffic source is + the pod's local node, OR if the traffic matches at least one ingress rule + across all of the NetworkPolicy objects whose podSelector matches the pod. If + this field is empty then this NetworkPolicy does not allow any traffic (and serves + solely to ensure that the pods it selects are isolated by default) + items: + description: |- + NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods + matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. + properties: + from: + description: |- + from is a list of sources which should be able to access the pods selected for this rule. + Items in this list are combined using a logical OR operation. If this field is + empty or missing, this rule matches all sources (traffic not restricted by + source). If this field is present and contains at least one item, this rule + allows traffic only if the traffic matches at least one item in the from list. + items: + description: |- + NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of + fields are allowed + properties: + ipBlock: + description: |- + ipBlock defines policy on a particular IPBlock. If this field is set then + neither of the other fields can be. + properties: + cidr: + description: |- + cidr is a string representing the IPBlock + Valid examples are "192.168.1.0/24" or "2001:db8::/64" + type: string + except: + description: |- + except is a slice of CIDRs that should not be included within an IPBlock + Valid examples are "192.168.1.0/24" or "2001:db8::/64" + Except values will be rejected if they are outside the cidr range + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - cidr + type: object + namespaceSelector: + description: |- + namespaceSelector selects namespaces using cluster-scoped labels. This field follows + standard label selector semantics; if present but empty, it selects all namespaces. + + If podSelector is also set, then the NetworkPolicyPeer as a whole selects + the pods matching podSelector in the namespaces selected by namespaceSelector. + Otherwise it selects all pods in the namespaces selected by namespaceSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + podSelector is a label selector which selects pods. This field follows standard label + selector semantics; if present but empty, it selects all pods. + + If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + the pods matching podSelector in the Namespaces selected by NamespaceSelector. + Otherwise it selects the pods matching podSelector in the policy's own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + ports is a list of ports which should be made accessible on the pods selected for + this rule. Each item in this list is combined using a logical OR. If this field is + empty or missing, this rule matches all ports (traffic not restricted by port). + If this field is present and contains at least one item, then this rule allows + traffic only if the traffic matches at least one port in the list. + items: + description: NetworkPolicyPort describes a port to allow traffic on + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be allowed by the policy. This field cannot be defined if the port field + is not defined or if the port field is defined as a named (string) port. + The endPort must be equal or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: |- + port represents the port on the given protocol. This can either be a numerical or named + port on a pod. If this field is not provided, this matches all port names and + numbers. + If present, only traffic on the specified protocol AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + description: |- + protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + If not specified, this field defaults to TCP. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: array + podSelector: + description: |- + Selects the pods to which this NetworkPolicy object applies. The array of + ingress rules is applied to any pods selected by this field. Multiple network + policies can select the same set of pods. In this case, the ingress rules for + each are combined additively. This field is NOT optional and follows standard + label selector semantics. An empty podSelector matches all pods in this + namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + policyTypes: + description: |- + List of rule types that the NetworkPolicy relates to. + Valid options are Ingress, Egress, or Ingress,Egress. + If this field is not specified, it will default based on the existence of Ingress or Egress rules; + policies that contain an Egress section are assumed to affect Egress, and all policies + (whether or not they contain an Ingress section) are assumed to affect Ingress. + If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + Likewise, if you want to write a policy that specifies that no egress is allowed, + you must specify a policyTypes value that include "Egress" (since such a policy would not include + an Egress section and would otherwise default to just [ "Ingress" ]). + This field is beta-level in 1.8 + items: + description: |- + PolicyType string describes the NetworkPolicy type + This type is beta-level in 1.8 + type: string + type: array + stagedAction: + description: The staged action. If this is omitted, the default is Set. + type: string + type: object + type: object + served: true + storage: true +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: stagednetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: StagedNetworkPolicy + listKind: StagedNetworkPolicyList + plural: stagednetworkpolicies + singular: stagednetworkpolicy + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + egress: + description: |- + The ordered set of egress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. + items: + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply to destination entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP requests. + properties: + methods: + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. + items: + type: string + type: array + paths: + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. + items: + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + ipVersion: + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. + type: integer + metadata: + description: Metadata contains additional information for this rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to source entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: |- + The ordered set of ingress rules. Each rule contains a set of packet match criteria and + a corresponding action to apply. + items: + description: |- + A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy + and security Profiles reference rules - separated out as a list of rules for both + ingress and egress packet matching. + + Each positive match criteria has a negated version, prefixed with "Not". All the match + criteria within a rule must be satisfied for a packet to match. A single rule can contain + the positive and negative version of a match and both must be satisfied for the rule to match. + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply to destination entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP requests. + properties: + methods: + description: |- + Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + HTTP Methods (e.g. GET, PUT, etc.) + Multiple methods are OR'd together. + items: + type: string + type: array + paths: + description: |- + Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + HTTP Paths. + Multiple paths are OR'd together. + e.g: + - exact: /foo + - prefix: /bar + NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. + items: + description: |- + HTTPPath specifies an HTTP path to match. It may be either of the form: + exact: : which matches the path exactly or + prefix: : which matches the path prefix + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: |- + ICMP is an optional field that restricts the rule to apply to a specific type and + code of ICMP traffic. This should only be specified if the Protocol field is set to + "ICMP" or "ICMPv6". + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + ipVersion: + description: |- + IPVersion is an optional field that restricts the rule to only match a specific IP + version. + type: integer + metadata: + description: Metadata contains additional information for this rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: |- + Match on a specific ICMP code. If specified, the Type value must also be specified. + This is a technical limitation imposed by the kernel's iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: |- + Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: |- + Protocol is an optional field that restricts the rule to only apply to traffic of + a specific IP protocol. Required if any of the EntityRules contain Ports + (because ports only apply to certain protocols). + + Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + or an integer in the range 1-255. + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to source entity. + properties: + namespaceSelector: + description: |- + NamespaceSelector is an optional field that contains a selector expression. Only traffic + that originates from (or terminates at) endpoints within the selected namespaces will be + matched. When both NamespaceSelector and another selector are defined on the same rule, then only + workload endpoints that are matched by both selectors will be selected by the rule. + + For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + only workload endpoints in the same namespace as the NetworkPolicy. + + For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + only GlobalNetworkSet or HostEndpoint. + + For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces. + type: string + nets: + description: |- + Nets is an optional field that restricts the rule to only apply to traffic that + originates from (or terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: |- + NotPorts is the negated version of the Ports field. + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: |- + NotSelector is the negated version of the Selector field. See Selector field for + subtleties with negated selectors. + type: string + ports: + description: |- + Ports is an optional field that restricts the rule to only apply to traffic that has a + source (destination) port that matches one of these ranges/values. This value is a + list of integers or strings that represent ranges of ports. + + Since only some protocols have ports, if any ports are specified it requires the + Protocol match in the Rule to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains a selector expression (see Policy for\nsample syntax). Only traffic that originates from (terminates at) endpoints matching\nthe selector will be matched.\n\nNote that: in addition to the negated version of the Selector (see NotSelector below), the\nselector expression syntax itself supports negation. The two types of negation are subtly\ndifferent. One negates the set of matched endpoints, the other negates the whole match:\n\n\tSelector = \"!has(my_label)\" matches packets that are from other Calico-controlled\n\tendpoints that do not have the label \"my_label\".\n\n\tNotSelector = \"has(my_label)\" matches packets that are not from Calico-controlled\n\tendpoints that do have the label \"my_label\".\n\nThe effect is that the latter will accept packets from non-Calico sources whereas the\nformer is limited to packets from Calico-controlled endpoints." + type: string + serviceAccounts: + description: |- + ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: |- + Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + at) a pod running as a service account whose name is in the list. + items: + type: string + type: array + selector: + description: |- + Selector is an optional field that restricts the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account that matches the given label selector. + If both Names and Selector are specified then they are AND'ed. + type: string + type: object + services: + description: |- + Services is an optional field that contains options for matching Kubernetes Services. + If specified, only traffic that originates from or terminates at endpoints within the selected + service(s) will be matched, and only to/from each endpoint's port. + + Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + NotNets or ServiceAccounts. + + Ports and NotPorts can only be specified with Services on ingress rules. + properties: + name: + description: Name specifies the name of a Kubernetes Service to match. + type: string + namespace: + description: |- + Namespace specifies the namespace of the given Service. If left empty, the rule + will match within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: |- + Order is an optional field that specifies the order in which the policy is applied. + Policies with higher "order" are applied after those with lower + order within the same tier. If the order is omitted, it may be considered to be "infinite" - i.e. the + policy will be applied last. Policies with identical order will be applied in + alphanumerical order based on the Policy "Name" within the tier. + type: number + performanceHints: + description: |- + PerformanceHints contains a list of hints to Calico's policy engine to + help process the policy more efficiently. Hints never change the + enforcement behaviour of the policy. + + Currently, the only available hint is "AssumeNeededOnEveryNode". When + that hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for "preloading" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work + done to preload the policy (and to maintain it) is wasted. + items: + type: string + type: array + selector: + description: "The selector is an expression used to pick pick out the endpoints that the policy should\nbe applied to.\n\nSelector expressions follow this syntax:\n\n\tlabel == \"string_literal\" -> comparison, e.g. my_label == \"foo bar\"\n\tlabel != \"string_literal\" -> not equal; also matches if label is not present\n\tlabel in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", \"c\"\n\tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the value of label X is not one of \"a\", \"b\", \"c\"\n\thas(label_name) -> True if that label is present\n\t! expr -> negation of expr\n\texpr && expr -> Short-circuit and\n\texpr || expr -> Short-circuit or\n\t( expr ) -> parens for grouping\n\tall() or the empty selector -> matches all endpoints.\n\nLabel names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive\nbut they do not support escape characters.\n\nExamples (with made-up labels):\n\n\ttype == \"webserver\" && deployment == \"prod\"\n\ttype in {\"frontend\", \"backend\"}\n\tdeployment != \"dev\"\n\t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. + type: string + stagedAction: + description: The staged action. If this is omitted, the default is Set. + type: string + tier: + description: |- + The name of the tier that this policy belongs to. If this is omitted, the default + tier (name is "default") is assumed. The specified tier must exist in order to create + security policies within the tier, the "default" tier is created automatically if it + does not exist, this means for deployments requiring only a single Tier, the tier name + may be omitted on all policy management requests. + type: string + types: + description: |- + Types indicates whether this policy applies to ingress, or to egress, or to both. When + not explicitly specified (and so the value on creation is empty or nil), Calico defaults + Types according to what Ingress and Egress are present in the policy. The + default is: + + - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are + also no Ingress rules) + + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules + + - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. + + When the policy is read back again, Types will always be one of these values, never empty + or nil. + items: + description: PolicyType enumerates the possible values of the PolicySpec Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: tiers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: Tier + listKind: TierList + plural: tiers + singular: tier + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TierSpec contains the specification for a security policy tier resource. + properties: + defaultAction: + description: |- + DefaultAction specifies the action applied to workloads selected by a policy in the tier, + but not rule matched the workload's traffic. + [Default: Deny] + enum: + - Pass + - Deny + type: string + order: + description: |- + Order is an optional field that specifies the order in which the tier is applied. + Tiers with higher "order" are applied after those with lower order. If the order + is omitted, it may be considered to be "infinite" - i.e. the tier will be applied + last. Tiers with identical order will be applied in alphanumerical order based + on the Tier "Name". + type: number + type: object + type: object + served: true + storage: true +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/network-policy-api/pull/30 + policy.networking.k8s.io/bundle-version: v0.1.1 + policy.networking.k8s.io/channel: experimental + creationTimestamp: null + name: adminnetworkpolicies.policy.networking.k8s.io +spec: + group: policy.networking.k8s.io + names: + kind: AdminNetworkPolicy + listKind: AdminNetworkPolicyList + plural: adminnetworkpolicies + shortNames: + - anp + singular: adminnetworkpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.priority + name: Priority + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + AdminNetworkPolicy is a cluster level resource that is part of the + AdminNetworkPolicy API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of AdminNetworkPolicy. + properties: + egress: + description: |- + Egress is the list of Egress rules to be applied to the selected pods. + A total of 100 rules will be allowed in each ANP instance. + The relative precedence of egress rules within a single ANP object (all of + which share the priority) will be determined by the order in which the rule + is written. Thus, a rule that appears at the top of the egress rules + would take the highest precedence. + ANPs with no egress rules do not affect egress traffic. + + + Support: Core + items: + description: |- + AdminNetworkPolicyEgressRule describes an action to take on a particular + set of traffic originating from pods selected by a AdminNetworkPolicy's + Subject field. + + properties: + action: + description: |- + Action specifies the effect this rule will have on matching traffic. + Currently the following actions are supported: + Allow: allows the selected traffic (even if it would otherwise have been denied by NetworkPolicy) + Deny: denies the selected traffic + Pass: instructs the selected traffic to skip any remaining ANP rules, and + then pass execution to any NetworkPolicies that select the pod. + If the pod is not selected by any NetworkPolicies then execution + is passed to any BaselineAdminNetworkPolicies that select the pod. + + + Support: Core + enum: + - Allow + - Deny + - Pass + type: string + name: + description: |- + Name is an identifier for this rule, that may be no more than 100 characters + in length. This field should be used by the implementation to help + improve observability, readability and error-reporting for any applied + AdminNetworkPolicies. + + + Support: Core + maxLength: 100 + type: string + ports: + description: |- + Ports allows for matching traffic based on port and protocols. + This field is a list of destination ports for the outgoing egress traffic. + If Ports is not set then the rule does not filter traffic via port. + + + Support: Core + items: + description: |- + AdminNetworkPolicyPort describes how to select network ports on pod(s). + Exactly one field must be set. + maxProperties: 1 + minProperties: 1 + properties: + namedPort: + description: |- + NamedPort selects a port on a pod(s) based on name. + + + Support: Extended + + + + type: string + portNumber: + description: |- + Port selects a port on a pod(s) based on number. + + + Support: Core + properties: + port: + description: |- + Number defines a network port value. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + required: + - port + - protocol + type: object + portRange: + description: |- + PortRange selects a port range on a pod(s) based on provided start and end + values. + + + Support: Core + properties: + end: + description: |- + End defines a network port that is the end of a port range, the End value + must be greater than Start. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + start: + description: |- + Start defines a network port that is the start of a port range, the Start + value must be less than End. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object + type: object + maxItems: 100 + type: array + to: + description: |- + To is the List of destinations whose traffic this rule applies to. + If any AdminNetworkPolicyEgressPeer matches the destination of outgoing + traffic then the specified action is applied. + This field must be defined and contain at least one item. + + + Support: Core + items: + description: |- + AdminNetworkPolicyEgressPeer defines a peer to allow traffic to. + Exactly one of the selector pointers must be set for a given peer. If a + consumer observes none of its fields are set, they must assume an unknown + option has been specified and fail closed. + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: |- + Namespaces defines a way to select all pods within a set of Namespaces. + Note that host-networked pods are not included in this type of peer. + + + Support: Core + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + networks: + description: |- + Networks defines a way to select peers via CIDR blocks. + This is intended for representing entities that live outside the cluster, + which can't be selected by pods, namespaces and nodes peers, but note + that cluster-internal traffic will be checked against the rule as + well. So if you Allow or Deny traffic to `"0.0.0.0/0"`, that will allow + or deny all IPv4 pod-to-pod traffic as well. If you don't want that, + add a rule that Passes all pod traffic before the Networks rule. + + + Each item in Networks should be provided in the CIDR format and should be + IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + + + Networks can have upto 25 CIDRs specified. + + + Support: Extended + + + + items: + description: |- + CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + This string must be validated by implementations using net.ParseCIDR + TODO: Introduce CEL CIDR validation regex isCIDR() in Kube 1.31 when it is available. + maxLength: 43 + type: string + x-kubernetes-validations: + - message: CIDR must be either an IPv4 or IPv6 address. IPv4 address embedded in IPv6 addresses are not supported + rule: self.contains(':') != self.contains('.') + maxItems: 25 + minItems: 1 + type: array + x-kubernetes-list-type: set + nodes: + description: |- + Nodes defines a way to select a set of nodes in + the cluster. This field follows standard label selector + semantics; if present but empty, it selects all Nodes. + + + Support: Extended + + + + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: |- + Pods defines a way to select a set of pods in + a set of namespaces. Note that host-networked pods + are not included in this type of peer. + + + Support: Core + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + maxItems: 100 + minItems: 1 + type: array + required: + - action + - to + type: object + x-kubernetes-validations: + - message: networks/nodes peer cannot be set with namedPorts since there are no namedPorts for networks/nodes + rule: '!(self.to.exists(peer, has(peer.networks) || has(peer.nodes)) && has(self.ports) && self.ports.exists(port, has(port.namedPort)))' + maxItems: 100 + type: array + ingress: + description: |- + Ingress is the list of Ingress rules to be applied to the selected pods. + A total of 100 rules will be allowed in each ANP instance. + The relative precedence of ingress rules within a single ANP object (all of + which share the priority) will be determined by the order in which the rule + is written. Thus, a rule that appears at the top of the ingress rules + would take the highest precedence. + ANPs with no ingress rules do not affect ingress traffic. + + + Support: Core + items: + description: |- + AdminNetworkPolicyIngressRule describes an action to take on a particular + set of traffic destined for pods selected by an AdminNetworkPolicy's + Subject field. + properties: + action: + description: |- + Action specifies the effect this rule will have on matching traffic. + Currently the following actions are supported: + Allow: allows the selected traffic (even if it would otherwise have been denied by NetworkPolicy) + Deny: denies the selected traffic + Pass: instructs the selected traffic to skip any remaining ANP rules, and + then pass execution to any NetworkPolicies that select the pod. + If the pod is not selected by any NetworkPolicies then execution + is passed to any BaselineAdminNetworkPolicies that select the pod. + + + Support: Core + enum: + - Allow + - Deny + - Pass + type: string + from: + description: |- + From is the list of sources whose traffic this rule applies to. + If any AdminNetworkPolicyIngressPeer matches the source of incoming + traffic then the specified action is applied. + This field must be defined and contain at least one item. + + + Support: Core + items: + description: |- + AdminNetworkPolicyIngressPeer defines an in-cluster peer to allow traffic from. + Exactly one of the selector pointers must be set for a given peer. If a + consumer observes none of its fields are set, they must assume an unknown + option has been specified and fail closed. + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: |- + Namespaces defines a way to select all pods within a set of Namespaces. + Note that host-networked pods are not included in this type of peer. + + + Support: Core + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: |- + Pods defines a way to select a set of pods in + a set of namespaces. Note that host-networked pods + are not included in this type of peer. + + + Support: Core + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + maxItems: 100 + minItems: 1 + type: array + name: + description: |- + Name is an identifier for this rule, that may be no more than 100 characters + in length. This field should be used by the implementation to help + improve observability, readability and error-reporting for any applied + AdminNetworkPolicies. + + + Support: Core + maxLength: 100 + type: string + ports: + description: |- + Ports allows for matching traffic based on port and protocols. + This field is a list of ports which should be matched on + the pods selected for this policy i.e the subject of the policy. + So it matches on the destination port for the ingress traffic. + If Ports is not set then the rule does not filter traffic via port. + + + Support: Core + items: + description: |- + AdminNetworkPolicyPort describes how to select network ports on pod(s). + Exactly one field must be set. + maxProperties: 1 + minProperties: 1 + properties: + namedPort: + description: |- + NamedPort selects a port on a pod(s) based on name. + + + Support: Extended + + + + type: string + portNumber: + description: |- + Port selects a port on a pod(s) based on number. + + + Support: Core + properties: + port: + description: |- + Number defines a network port value. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + required: + - port + - protocol + type: object + portRange: + description: |- + PortRange selects a port range on a pod(s) based on provided start and end + values. + + + Support: Core + properties: + end: + description: |- + End defines a network port that is the end of a port range, the End value + must be greater than Start. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + start: + description: |- + Start defines a network port that is the start of a port range, the Start + value must be less than End. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object + type: object + maxItems: 100 + type: array + required: + - action + - from + type: object + maxItems: 100 + type: array + priority: + description: |- + Priority is a value from 0 to 1000. Rules with lower priority values have + higher precedence, and are checked before rules with higher priority values. + All AdminNetworkPolicy rules have higher precedence than NetworkPolicy or + BaselineAdminNetworkPolicy rules + The behavior is undefined if two ANP objects have same priority. + + + Support: Core + format: int32 + maximum: 1000 + minimum: 0 + type: integer + subject: + description: |- + Subject defines the pods to which this AdminNetworkPolicy applies. + Note that host-networked pods are not included in subject selection. + + + Support: Core + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: Namespaces is used to select pods via namespace selectors. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: Pods is used to select pods via namespace AND pod selectors. + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + required: + - priority + - subject + type: object + status: + description: Status is the status to be reported by the implementation. + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - conditions + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/network-policy-api/pull/30 + policy.networking.k8s.io/bundle-version: v0.1.1 + policy.networking.k8s.io/channel: experimental + creationTimestamp: null + name: baselineadminnetworkpolicies.policy.networking.k8s.io +spec: + group: policy.networking.k8s.io + names: + kind: BaselineAdminNetworkPolicy + listKind: BaselineAdminNetworkPolicyList + plural: baselineadminnetworkpolicies + shortNames: + - banp + singular: baselineadminnetworkpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + BaselineAdminNetworkPolicy is a cluster level resource that is part of the + AdminNetworkPolicy API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of BaselineAdminNetworkPolicy. + properties: + egress: + description: |- + Egress is the list of Egress rules to be applied to the selected pods if + they are not matched by any AdminNetworkPolicy or NetworkPolicy rules. + A total of 100 Egress rules will be allowed in each BANP instance. + The relative precedence of egress rules within a single BANP object + will be determined by the order in which the rule is written. + Thus, a rule that appears at the top of the egress rules + would take the highest precedence. + BANPs with no egress rules do not affect egress traffic. + + + Support: Core + items: + description: |- + BaselineAdminNetworkPolicyEgressRule describes an action to take on a particular + set of traffic originating from pods selected by a BaselineAdminNetworkPolicy's + Subject field. + + properties: + action: + description: |- + Action specifies the effect this rule will have on matching traffic. + Currently the following actions are supported: + Allow: allows the selected traffic + Deny: denies the selected traffic + + + Support: Core + enum: + - Allow + - Deny + type: string + name: + description: |- + Name is an identifier for this rule, that may be no more than 100 characters + in length. This field should be used by the implementation to help + improve observability, readability and error-reporting for any applied + BaselineAdminNetworkPolicies. + + + Support: Core + maxLength: 100 + type: string + ports: + description: |- + Ports allows for matching traffic based on port and protocols. + This field is a list of destination ports for the outgoing egress traffic. + If Ports is not set then the rule does not filter traffic via port. + items: + description: |- + AdminNetworkPolicyPort describes how to select network ports on pod(s). + Exactly one field must be set. + maxProperties: 1 + minProperties: 1 + properties: + namedPort: + description: |- + NamedPort selects a port on a pod(s) based on name. + + + Support: Extended + + + + type: string + portNumber: + description: |- + Port selects a port on a pod(s) based on number. + + + Support: Core + properties: + port: + description: |- + Number defines a network port value. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + required: + - port + - protocol + type: object + portRange: + description: |- + PortRange selects a port range on a pod(s) based on provided start and end + values. + + + Support: Core + properties: + end: + description: |- + End defines a network port that is the end of a port range, the End value + must be greater than Start. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + start: + description: |- + Start defines a network port that is the start of a port range, the Start + value must be less than End. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object + type: object + maxItems: 100 + type: array + to: + description: |- + To is the list of destinations whose traffic this rule applies to. + If any AdminNetworkPolicyEgressPeer matches the destination of outgoing + traffic then the specified action is applied. + This field must be defined and contain at least one item. + + + Support: Core + items: + description: |- + AdminNetworkPolicyEgressPeer defines a peer to allow traffic to. + Exactly one of the selector pointers must be set for a given peer. If a + consumer observes none of its fields are set, they must assume an unknown + option has been specified and fail closed. + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: |- + Namespaces defines a way to select all pods within a set of Namespaces. + Note that host-networked pods are not included in this type of peer. + + + Support: Core + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + networks: + description: |- + Networks defines a way to select peers via CIDR blocks. + This is intended for representing entities that live outside the cluster, + which can't be selected by pods, namespaces and nodes peers, but note + that cluster-internal traffic will be checked against the rule as + well. So if you Allow or Deny traffic to `"0.0.0.0/0"`, that will allow + or deny all IPv4 pod-to-pod traffic as well. If you don't want that, + add a rule that Passes all pod traffic before the Networks rule. + + + Each item in Networks should be provided in the CIDR format and should be + IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + + + Networks can have upto 25 CIDRs specified. + + + Support: Extended + + + + items: + description: |- + CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + This string must be validated by implementations using net.ParseCIDR + TODO: Introduce CEL CIDR validation regex isCIDR() in Kube 1.31 when it is available. + maxLength: 43 + type: string + x-kubernetes-validations: + - message: CIDR must be either an IPv4 or IPv6 address. IPv4 address embedded in IPv6 addresses are not supported + rule: self.contains(':') != self.contains('.') + maxItems: 25 + minItems: 1 + type: array + x-kubernetes-list-type: set + nodes: + description: |- + Nodes defines a way to select a set of nodes in + the cluster. This field follows standard label selector + semantics; if present but empty, it selects all Nodes. + + + Support: Extended + + + + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: |- + Pods defines a way to select a set of pods in + a set of namespaces. Note that host-networked pods + are not included in this type of peer. + + + Support: Core + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + maxItems: 100 + minItems: 1 + type: array + required: + - action + - to + type: object + x-kubernetes-validations: + - message: networks/nodes peer cannot be set with namedPorts since there are no namedPorts for networks/nodes + rule: '!(self.to.exists(peer, has(peer.networks) || has(peer.nodes)) && has(self.ports) && self.ports.exists(port, has(port.namedPort)))' + maxItems: 100 + type: array + ingress: + description: |- + Ingress is the list of Ingress rules to be applied to the selected pods + if they are not matched by any AdminNetworkPolicy or NetworkPolicy rules. + A total of 100 Ingress rules will be allowed in each BANP instance. + The relative precedence of ingress rules within a single BANP object + will be determined by the order in which the rule is written. + Thus, a rule that appears at the top of the ingress rules + would take the highest precedence. + BANPs with no ingress rules do not affect ingress traffic. + + + Support: Core + items: + description: |- + BaselineAdminNetworkPolicyIngressRule describes an action to take on a particular + set of traffic destined for pods selected by a BaselineAdminNetworkPolicy's + Subject field. + properties: + action: + description: |- + Action specifies the effect this rule will have on matching traffic. + Currently the following actions are supported: + Allow: allows the selected traffic + Deny: denies the selected traffic + + + Support: Core + enum: + - Allow + - Deny + type: string + from: + description: |- + From is the list of sources whose traffic this rule applies to. + If any AdminNetworkPolicyIngressPeer matches the source of incoming + traffic then the specified action is applied. + This field must be defined and contain at least one item. + + + Support: Core + items: + description: |- + AdminNetworkPolicyIngressPeer defines an in-cluster peer to allow traffic from. + Exactly one of the selector pointers must be set for a given peer. If a + consumer observes none of its fields are set, they must assume an unknown + option has been specified and fail closed. + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: |- + Namespaces defines a way to select all pods within a set of Namespaces. + Note that host-networked pods are not included in this type of peer. + + + Support: Core + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: |- + Pods defines a way to select a set of pods in + a set of namespaces. Note that host-networked pods + are not included in this type of peer. + + + Support: Core + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + maxItems: 100 + minItems: 1 + type: array + name: + description: |- + Name is an identifier for this rule, that may be no more than 100 characters + in length. This field should be used by the implementation to help + improve observability, readability and error-reporting for any applied + BaselineAdminNetworkPolicies. + + + Support: Core + maxLength: 100 + type: string + ports: + description: |- + Ports allows for matching traffic based on port and protocols. + This field is a list of ports which should be matched on + the pods selected for this policy i.e the subject of the policy. + So it matches on the destination port for the ingress traffic. + If Ports is not set then the rule does not filter traffic via port. + + + Support: Core + items: + description: |- + AdminNetworkPolicyPort describes how to select network ports on pod(s). + Exactly one field must be set. + maxProperties: 1 + minProperties: 1 + properties: + namedPort: + description: |- + NamedPort selects a port on a pod(s) based on name. + + + Support: Extended + + + + type: string + portNumber: + description: |- + Port selects a port on a pod(s) based on number. + + + Support: Core + properties: + port: + description: |- + Number defines a network port value. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + required: + - port + - protocol + type: object + portRange: + description: |- + PortRange selects a port range on a pod(s) based on provided start and end + values. + + + Support: Core + properties: + end: + description: |- + End defines a network port that is the end of a port range, the End value + must be greater than Start. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + start: + description: |- + Start defines a network port that is the start of a port range, the Start + value must be less than End. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object + type: object + maxItems: 100 + type: array + required: + - action + - from + type: object + maxItems: 100 + type: array + subject: + description: |- + Subject defines the pods to which this BaselineAdminNetworkPolicy applies. + Note that host-networked pods are not included in subject selection. + + + Support: Core + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: Namespaces is used to select pods via namespace selectors. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: Pods is used to select pods via namespace AND pod selectors. + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + required: + - subject + type: object + status: + description: Status is the status to be reported by the implementation. + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - conditions + type: object + required: + - metadata + - spec + type: object + x-kubernetes-validations: + - message: Only one baseline admin network policy with metadata.name="default" can be created in the cluster + rule: self.metadata.name == 'default' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: +# Nodes are watched to monitor for deletions. +- apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get +# Pods are watched to check for existence as part of IPAM controller. +- apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch +# Services are monitored for service LoadBalancer IP allocation +- apiGroups: [""] + resources: + - services + - services/status + verbs: + - get + - list + - update + - watch +# IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. +- apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list +- apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + - ipamconfigs + - tiers + verbs: + - get + - list + - create + - update + - delete + - watch +# Pools are watched to maintain a mapping of blocks to IP pools. +- apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch +# kube-controllers manages hostendpoints. +- apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + - watch +# Needs access to update clusterinformations. +- apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch +# KubeControllersConfiguration is where it gets its config +- apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + - list + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-node rules: - # Used for creating service account tokens to be used by the CNI plugin - - apiGroups: [""] - resources: - - serviceaccounts/token - resourceNames: - - calico-node - verbs: - - create - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - # EndpointSlices are used for Service-based network policy rule - # enforcement. - - apiGroups: ["discovery.k8s.io"] - resources: - - endpointslices - verbs: - - watch - - list - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - # Pod CIDR auto-detection on kubeadm needs access to config maps. - - apiGroups: [""] - resources: - - configmaps - verbs: - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipreservations - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - - caliconodestatuses - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico must update some CRDs. - - apiGroups: [ "crd.projectcalico.org" ] - resources: - - caliconodestatuses - verbs: - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only required for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - # These permissions are required for Calico CNI to perform IPAM allocations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipamconfigs - verbs: - - get - # Block affinities must also be watchable by confd for route aggregation. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - verbs: - - watch - # The Calico IPAM migration needs to get daemonsets. These permissions can be - # removed if not upgrading from an installation using host-local IPAM. - - apiGroups: ["apps"] - resources: - - daemonsets - verbs: - - get - +# Used for creating service account tokens to be used by the CNI plugin +- apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-cni-plugin + verbs: + - create +# The CNI plugin needs to get pods, nodes, and namespaces. +- apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get +# EndpointSlices are used for Service-based network policy rule +# enforcement. +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list +- apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get +# Pod CIDR auto-detection on kubeadm needs access to config maps. +- apiGroups: [""] + resources: + - configmaps + verbs: + - get +- apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update +# Watch for changes to Kubernetes NetworkPolicies. +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list +# Watch for changes to Kubernetes (Baseline)AdminNetworkPolicies. +- apiGroups: ["policy.networking.k8s.io"] + resources: + - adminnetworkpolicies + - baselineadminnetworkpolicies + verbs: + - watch + - list +# Used by Calico for policy information. +- apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch +# The CNI plugin patches pods/status. +- apiGroups: [""] + resources: + - pods/status + verbs: + - patch +# Calico monitors various CRDs for config. +- apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - bgpfilters + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - stagedglobalnetworkpolicies + - networkpolicies + - stagednetworkpolicies + - stagedkubernetesnetworkpolicies + - globalnetworksets + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + - tiers + verbs: + - get + - list + - watch + # Calico creates some tiers on startup. +- apiGroups: ["crd.projectcalico.org"] + resources: + - tiers + verbs: + - create +# Calico must create and update some CRDs on startup. +- apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update +# Calico must update some CRDs. +- apiGroups: ["crd.projectcalico.org"] + resources: + - caliconodestatuses + verbs: + - update +# Calico stores some configuration information on the node. +- apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch +# These permissions are only required for upgrade from v2.6, and can +# be removed after upgrade or on fresh installations. +- apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update +# These permissions are required for Calico CNI to perform IPAM allocations. +- apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete +# The CNI plugin and calico/node need to be able to create a default +# IPAMConfiguration +- apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + - create +# Block affinities must also be watchable by confd for route aggregation. +- apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch +# The Calico IPAM migration needs to get daemonsets. These permissions can be +# removed if not upgrading from an installation using host-local IPAM. +- apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +--- +# Source: calico/templates/calico-node-rbac.yaml +# CNI cluster role +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-cni-plugin +rules: +- apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get +- apiGroups: [""] + resources: + - pods/status + verbs: + - patch +- apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + - clusterinformations + - ippools + - ipreservations + - ipamconfigs + verbs: + - get + - list + - create + - update + - delete +--- +# Source: calico/templates/tier-getter.yaml +# Implements the necessary permissions for the kube-controller-manager to interact with +# Tiers and Tiered Policies for GC. +# +# https://github.com/tigera/operator/blob/v1.37.0/pkg/render/apiserver.go#L1505-L1545 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-tier-getter +rules: +- apiGroups: + - "projectcalico.org" + resources: + - "tiers" + verbs: + - "get" +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system --- +# Source: calico/templates/calico-node-rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -4261,7 +9123,34 @@ subjects: - kind: ServiceAccount name: calico-node namespace: kube-system - +--- +# Source: calico/templates/calico-node-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-cni-plugin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-cni-plugin +subjects: +- kind: ServiceAccount + name: calico-cni-plugin + namespace: kube-system +--- +# Source: calico/templates/tier-getter.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-tier-getter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-tier-getter +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:kube-controller-manager --- # Source: calico/templates/calico-node.yaml # This manifest installs the calico-node container, as well @@ -4291,303 +9180,305 @@ spec: kubernetes.io/os: linux hostNetwork: true tolerations: - # Make sure calico-node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists serviceAccountName: calico-node + securityContext: + seccompProfile: + type: RuntimeDefault # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 priorityClassName: system-node-critical initContainers: - # This container performs upgrade from host-local IPAM to calico-ipam. - # It can be deleted if this is a fresh installation, or if you have already - # upgraded to use calico-ipam. - - name: upgrade-ipam - image: quay.io/calico/cni:v3.23.3 - command: ["/opt/cni/bin/calico-ipam", "-upgrade"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - volumeMounts: - - mountPath: /var/lib/cni/networks - name: host-local-net-dir - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - securityContext: - privileged: true - # This container installs the CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v3.23.3 - command: ["/opt/cni/bin/install"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - securityContext: - privileged: true - # This init container mounts the necessary filesystems needed by the BPF data plane - # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed - # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. - - name: "mount-bpffs" - image: quay.io/calico/node:v3.23.3 - command: ["calico-node", "-init", "-best-effort"] - volumeMounts: - - mountPath: /sys/fs - name: sys-fs - # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host - # so that it outlives the init container. - mountPropagation: Bidirectional - - mountPath: /var/run/calico - name: var-run-calico - # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host - # so that it outlives the init container. - mountPropagation: Bidirectional - # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, - # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. - - mountPath: /nodeproc - name: nodeproc - readOnly: true - securityContext: - privileged: true + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: docker.io/calico/cni:v3.30.2 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: docker.io/calico/cni:v3.30.2 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: docker.io/calico/node:v3.30.2 + imagePullPolicy: IfNotPresent + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true containers: - # Runs calico-node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v3.23.3 - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Enable or Disable VXLAN on the default IP pool. - - name: CALICO_IPV4POOL_VXLAN - value: "Never" - # Enable or Disable VXLAN on the default IPv6 IP pool. - - name: CALICO_IPV6POOL_VXLAN - value: "Never" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set MTU for the VXLAN tunnel device. - - name: FELIX_VXLANMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set MTU for the Wireguard tunnel device. - - name: FELIX_WIREGUARDMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - # - name: CALICO_IPV4POOL_CIDR - # value: "192.168.0.0/16" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - lifecycle: - preStop: - exec: - command: - - /bin/calico-node - - -shutdown - livenessProbe: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: docker.io/calico/node:v3.30.2 + imagePullPolicy: IfNotPresent + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + # Enable or Disable VXLAN on the default IPv6 IP pool. + - name: CALICO_IPV6POOL_VXLAN + value: "Never" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + lifecycle: + preStop: exec: command: - /bin/calico-node - - -felix-live - - -bird-live - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - timeoutSeconds: 10 - readinessProbe: - exec: - command: - - /bin/calico-node - - -felix-ready - - -bird-ready - periodSeconds: 10 - timeoutSeconds: 10 - volumeMounts: - # For maintaining CNI plugin API credentials. - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - readOnly: false - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the - # parent directory. - - name: bpffs - mountPath: /sys/fs/bpf - - name: cni-log-dir - mountPath: /var/log/calico/cni - readOnly: true - volumes: - # Used by calico-node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: sys-fs - hostPath: - path: /sys/fs/ - type: DirectoryOrCreate + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. - name: bpffs - hostPath: - path: /sys/fs/bpf - type: Directory - # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. - - name: nodeproc - hostPath: - path: /proc - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used to access CNI logs. + mountPath: /sys/fs/bpf - name: cni-log-dir - hostPath: - path: /var/log/calico/cni - # Mount in the directory for host-local IPAM allocations. This is - # used when upgrading from host-local to calico-ipam, and can be removed - # if not using the upgrade-ipam init container. - - name: host-local-net-dir - hostPath: - path: /var/lib/cni/networks - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + type: DirectoryOrCreate + - name: var-lib-calico + hostPath: + path: /var/lib/calico + type: DirectoryOrCreate + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent --- # Source: calico/templates/calico-kube-controllers.yaml # See https://github.com/projectcalico/kube-controllers @@ -4616,70 +9507,42 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule serviceAccountName: calico-kube-controllers + securityContext: + seccompProfile: + type: RuntimeDefault priorityClassName: system-cluster-critical containers: - - name: calico-kube-controllers - image: quay.io/calico/kube-controllers:v3.23.3 - env: - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: node - - name: DATASTORE_TYPE - value: kubernetes - livenessProbe: - exec: - command: - - /usr/bin/check-status - - -l - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - timeoutSeconds: 10 - readinessProbe: - exec: - command: - - /usr/bin/check-status - - -r - periodSeconds: 10 - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system - ---- - -# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict - -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers -spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-kube-controllers - ---- -# Source: calico/templates/calico-etcd-secrets.yaml - ---- -# Source: calico/templates/calico-typha.yaml - ---- -# Source: calico/templates/configure-canal.yaml - - + - name: calico-kube-controllers + image: docker.io/calico/kube-controllers:v3.30.2 + imagePullPolicy: IfNotPresent + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node,loadbalancer + - name: DATASTORE_TYPE + value: kubernetes + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + securityContext: + runAsNonRoot: true diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md index f95a504fe7..fabe5e43dc 100644 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## 3.4.0 (2025-06-27) + +### Added + +- #268: Added property to Constraints to include prereleases for Check and Validate + +### Changed + +- #263: Updated Go testing for 1.24, 1.23, and 1.22 +- #269: Updated the error message handling for message case and wrapping errors +- #266: Restore the ability to have leading 0's when parsing with NewVersion. + Opt-out of this by setting CoerceNewVersion to false. + +### Fixed + +- #257: Fixed the CodeQL link (thanks @dmitris) +- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out + of this by setting DetailedNewVersionErrors to false for faster performance. +- #267: Handle pre-releases for an "and" group if one constraint includes them + +## 3.3.1 (2024-11-19) + +### Fixed + +- #253: Fix for allowing some version that were invalid + ## 3.3.0 (2024-08-27) ### Added @@ -137,7 +163,7 @@ functions. These are described in the added and changed sections below. - #78: Fix unchecked error in example code (thanks @ravron) - #70: Fix the handling of pre-releases and the 0.0.0 release edge case - #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num +- #107: Fix handling prerelease when sorting alphanum and num - #109: Fixed where Validate sometimes returns wrong message on error ## 1.4.2 (2018-04-10) diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index ed56936084..2f56c676a5 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -50,6 +50,18 @@ other versions, convert the version back into a string, and get the original string. Getting the original string is useful if the semantic version was coerced into a valid form. +There are package level variables that affect how `NewVersion` handles parsing. + +- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant + versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch + part. This enables the use of CalVer in versions even when not compliant with SemVer. + When set to `false` less coercion work is done. +- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when + `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true` + it can provide some more insight into why a version is invalid. Setting + `DetailedNewVersionErrors` to `false` is faster on performance but provides less + detailed error messages if a version fails to parse. + ## Sorting Semantic Versions A set of versions can be sorted using the `sort` package from the standard library. @@ -160,6 +172,10 @@ means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case sensitivity doesn't apply here. This is due to ASCII sort ordering which is what the spec specifies. +The `Constraints` instance returned from `semver.NewConstraint()` has a property +`IncludePrerelease` that, when set to true, will return prerelease versions when calls +to `Check()` and `Validate()` are made. + ### Hyphen Range Comparisons There are multiple methods to handle ranges and the first is hyphens ranges. @@ -250,7 +266,7 @@ or [create a pull request](https://github.com/Masterminds/semver/pulls). Security is an important consideration for this project. The project currently uses the following tools to help discover security issues: -* [CodeQL](https://github.com/Masterminds/semver) +* [CodeQL](https://codeql.github.com) * [gosec](https://github.com/securego/gosec) * Daily Fuzz testing diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go index 8461c7ed90..8b7a10f836 100644 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -12,6 +12,13 @@ import ( // checked against. type Constraints struct { constraints [][]*constraint + containsPre []bool + + // IncludePrerelease specifies if pre-releases should be included in + // the results. Note, if a constraint range has a prerelease than + // prereleases will be included for that AND group even if this is + // set to false. + IncludePrerelease bool } // NewConstraint returns a Constraints instance that a Version instance can @@ -22,11 +29,10 @@ func NewConstraint(c string) (*Constraints, error) { c = rewriteRange(c) ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) + lenors := len(ors) + or := make([][]*constraint, lenors) + hasPre := make([]bool, lenors) for k, v := range ors { - - // TODO: Find a way to validate and fetch all the constraints in a simpler form - // Validate the segment if !validConstraintRegex.MatchString(v) { return nil, fmt.Errorf("improper constraint: %s", v) @@ -43,12 +49,22 @@ func NewConstraint(c string) (*Constraints, error) { return nil, err } + // If one of the constraints has a prerelease record this. + // This information is used when checking all in an "and" + // group to ensure they all check for prereleases. + if pc.con.pre != "" { + hasPre[k] = true + } + result[i] = pc } or[k] = result } - o := &Constraints{constraints: or} + o := &Constraints{ + constraints: or, + containsPre: hasPre, + } return o, nil } @@ -57,10 +73,10 @@ func (cs Constraints) Check(v *Version) bool { // TODO(mattfarina): For v4 of this library consolidate the Check and Validate // functions as the underlying functions make that possible now. // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { - if check, _ := c.check(v); !check { + if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check { joy = false break } @@ -83,12 +99,12 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { // Capture the prerelease message only once. When it happens the first time // this var is marked var prerelesase bool - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { // Before running the check handle the case there the version is // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { + if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" { if !prerelesase { em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) e = append(e, em) @@ -98,7 +114,7 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { } else { - if _, err := c.check(v); err != nil { + if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil { e = append(e, err) joy = false } @@ -227,8 +243,8 @@ type constraint struct { } // Check if a version meets the constraint -func (c *constraint) check(v *Version) (bool, error) { - return constraintOps[c.origfunc](v, c) +func (c *constraint) check(v *Version, includePre bool) (bool, error) { + return constraintOps[c.origfunc](v, c, includePre) } // String prints an individual constraint into a string @@ -236,7 +252,7 @@ func (c *constraint) string() string { return c.origfunc + c.orig } -type cfunc func(v *Version, c *constraint) (bool, error) +type cfunc func(v *Version, c *constraint, includePre bool) (bool, error) func parseConstraint(c string) (*constraint, error) { if len(c) > 0 { @@ -272,7 +288,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs.con = con @@ -290,7 +306,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs := &constraint{ @@ -305,16 +321,14 @@ func parseConstraint(c string) (*constraint, error) { } // Constraint functions -func constraintNotEqual(v *Version, c *constraint) (bool, error) { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } +func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + if c.dirty { if c.con.Major() != v.Major() { return true, nil } @@ -345,12 +359,11 @@ func constraintNotEqual(v *Version, c *constraint) (bool, error) { return true, nil } -func constraintGreaterThan(v *Version, c *constraint) (bool, error) { +func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -391,11 +404,10 @@ func constraintGreaterThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) } -func constraintLessThan(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -406,12 +418,11 @@ func constraintLessThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) } -func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { +func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -422,11 +433,10 @@ func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than %s", v, c.orig) } -func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -455,11 +465,10 @@ func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { // ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 // ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 // ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -487,16 +496,15 @@ func constraintTilde(v *Version, c *constraint) (bool, error) { // When there is a .x (dirty) status it automatically opts in to ~. Otherwise // it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } if c.dirty { - return constraintTilde(v, c) + return constraintTilde(v, c, includePre) } eq := v.Equal(c.con) @@ -516,11 +524,10 @@ func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { // ^0.0.3 --> >=0.0.3 <0.0.4 // ^0.0 --> >=0.0.0 <0.1.0 // ^0 --> >=0.0.0 <1.0.0 -func constraintCaret(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index ff499fb664..7a3ba73887 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -14,32 +14,52 @@ import ( // The compiled version of the regex created at init() is cached here so it // only needs to be created once. var versionRegex *regexp.Regexp +var looseVersionRegex *regexp.Regexp + +// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are +// not allowed in a valid semantic version. When set to true, NewVersion will coerce +// leading 0's into a valid version. +var CoerceNewVersion = true + +// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion +// function. This is used when CoerceNewVersion is set to false. If set to false +// ErrInvalidSemVer is returned for an invalid version. This does not apply to +// StrictNewVersion. Setting this function to false returns errors more quickly. +var DetailedNewVersionErrors = true var ( // ErrInvalidSemVer is returned a version is found to be invalid when // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") + ErrInvalidSemVer = errors.New("invalid semantic version") // ErrEmptyString is returned when an empty string is passed in for parsing. - ErrEmptyString = errors.New("Version string empty") + ErrEmptyString = errors.New("version string empty") // ErrInvalidCharacters is returned when invalid characters are found as // part of a version - ErrInvalidCharacters = errors.New("Invalid characters in version") + ErrInvalidCharacters = errors.New("invalid characters in version") // ErrSegmentStartsZero is returned when a version segment starts with 0. // This is invalid in SemVer. - ErrSegmentStartsZero = errors.New("Version segment starts with 0") + ErrSegmentStartsZero = errors.New("version segment starts with 0") // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") + ErrInvalidMetadata = errors.New("invalid metadata string") // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") + ErrInvalidPrerelease = errors.New("invalid prerelease string") ) // semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// looseSemVerRegex is a regular expression that lets invalid semver expressions through +// with enough detail that certain errors can be checked for. +const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` @@ -53,6 +73,7 @@ type Version struct { func init() { versionRegex = regexp.MustCompile("^" + semVerRegex + "$") + looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$") } const ( @@ -140,7 +161,80 @@ func StrictNewVersion(v string) (*Version, error) { // attempts to convert it to SemVer. If you want to validate it was a strict // semantic version at parse time see StrictNewVersion(). func NewVersion(v string) (*Version, error) { + if CoerceNewVersion { + return coerceNewVersion(v) + } m := versionRegex.FindStringSubmatch(v) + if m == nil { + + // Disabling detailed errors is first so that it is in the fast path. + if !DetailedNewVersionErrors { + return nil, ErrInvalidSemVer + } + + // Check for specific errors with the semver string and return a more detailed + // error. + m = looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + err := validateVersion(m) + if err != nil { + return nil, err + } + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +func coerceNewVersion(v string) (*Version, error) { + m := looseVersionRegex.FindStringSubmatch(v) if m == nil { return nil, ErrInvalidSemVer } @@ -154,13 +248,13 @@ func NewVersion(v string) (*Version, error) { var err error sv.major, err = strconv.ParseUint(m[1], 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } if m[2] != "" { sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.minor = 0 @@ -169,7 +263,7 @@ func NewVersion(v string) (*Version, error) { if m[3] != "" { sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.patch = 0 @@ -612,7 +706,9 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { - if containsOnly(p, num) { + if p == "" { + return ErrInvalidPrerelease + } else if containsOnly(p, num) { if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -631,9 +727,62 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { - if !containsOnly(p, allowed) { + if p == "" { return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} + +// validateVersion checks for common validation issues but may not catch all errors +func validateVersion(m []string) error { + var err error + var v string + if m[1] != "" { + if len(m[1]) > 1 && m[1][0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) } } + + if m[2] != "" { + v = strings.TrimPrefix(m[2], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[3] != "" { + v = strings.TrimPrefix(m[3], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[5] != "" { + if err = validatePrerelease(m[5]); err != nil { + return err + } + } + + if m[8] != "" { + if err = validateMetadata(m[8]); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s index b7723185b6..ce9f062894 100644 --- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s index 810aa9e648..ed33ba3d03 100644 --- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go index 5a939100d2..1f165141a9 100644 --- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go @@ -18,6 +18,9 @@ func (Curve) Identity() *Point { func (Curve) IsOnCurve(P *Point) bool { x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} rhs, lhs := &fp.Elt{}, &fp.Elt{} + // Check z != 0 + eq0 := !fp.IsZero(&P.z) + fp.Mul(t, &P.ta, &P.tb) // t = ta*tb fp.Sqr(x2, &P.x) // x^2 fp.Sqr(y2, &P.y) // y^2 @@ -27,13 +30,14 @@ func (Curve) IsOnCurve(P *Point) bool { fp.Mul(rhs, t2, ¶mD) // dt^2 fp.Add(rhs, rhs, z2) // z^2 + dt^2 fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2) - eq0 := fp.IsZero(lhs) + eq1 := fp.IsZero(lhs) fp.Mul(lhs, &P.x, &P.y) // xy fp.Mul(rhs, t, &P.z) // tz fp.Sub(lhs, lhs, rhs) // xy - tz - eq1 := fp.IsZero(lhs) - return eq0 && eq1 + eq2 := fp.IsZero(lhs) + + return eq0 && eq1 && eq2 } // Generator returns the generator point. diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go index 649a8e931d..3fd0df496f 100644 --- a/vendor/github.com/cloudflare/circl/internal/conv/conv.go +++ b/vendor/github.com/cloudflare/circl/internal/conv/conv.go @@ -5,6 +5,8 @@ import ( "fmt" "math/big" "strings" + + "golang.org/x/crypto/cryptobyte" ) // BytesLe2Hex returns an hexadecimal string of a number stored in a @@ -138,3 +140,34 @@ func BigInt2Uint64Le(z []uint64, x *big.Int) { z[i] = 0 } } + +// MarshalBinary encodes a value into a byte array in a format readable by UnmarshalBinary. +func MarshalBinary(v cryptobyte.MarshalingValue) ([]byte, error) { + const DefaultSize = 32 + b := cryptobyte.NewBuilder(make([]byte, 0, DefaultSize)) + b.AddValue(v) + return b.Bytes() +} + +// MarshalBinaryLen encodes a value into an array of n bytes in a format readable by UnmarshalBinary. +func MarshalBinaryLen(v cryptobyte.MarshalingValue, length uint) ([]byte, error) { + b := cryptobyte.NewFixedBuilder(make([]byte, 0, length)) + b.AddValue(v) + return b.Bytes() +} + +// A UnmarshalingValue decodes itself from a cryptobyte.String and advances the pointer. +// It reports whether the read was successful. +type UnmarshalingValue interface { + Unmarshal(*cryptobyte.String) bool +} + +// UnmarshalBinary recovers a value from a byte array. +// It returns an error if the read was unsuccessful. +func UnmarshalBinary(v UnmarshalingValue, data []byte) (err error) { + s := cryptobyte.String(data) + if data == nil || !v.Unmarshal(&s) || !s.Empty() { + err = fmt.Errorf("cannot read %T from input string", v) + } + return +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s index 5c4aeddecb..1fcc2dee17 100644 --- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" #include "fp_amd64.h" diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s index 435addf5e6..3f1f07c986 100644 --- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" #include "fp_amd64.h" diff --git a/vendor/github.com/cloudflare/circl/math/integer.go b/vendor/github.com/cloudflare/circl/math/integer.go new file mode 100644 index 0000000000..9c80c23b59 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/integer.go @@ -0,0 +1,16 @@ +package math + +import "math/bits" + +// NextPow2 finds the next power of two (N=2^k, k>=0) greater than n. +// If n is already a power of two, then this function returns n, and log2(n). +func NextPow2(n uint) (N uint, k uint) { + if bits.OnesCount(n) == 1 { + k = uint(bits.TrailingZeros(n)) + N = n + } else { + k = uint(bits.Len(n)) + N = uint(1) << k + } + return +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go index 374a69503c..d1c3b146b7 100644 --- a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go @@ -164,7 +164,7 @@ func (P *pointR1) isEqual(Q *pointR1) bool { fp.Mul(r, r, &P.z) fp.Sub(l, l, r) b = b && fp.IsZero(l) - return b + return b && !fp.IsZero(&P.z) && !fp.IsZero(&Q.z) } func (P *pointR3) neg() { diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go index 324bd8f334..c368b181b4 100644 --- a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go +++ b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go @@ -206,7 +206,7 @@ func newKeyFromSeed(privateKey, seed []byte) { func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { if len(ctx) > ContextMaxSize { - panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx)))) + panic(fmt.Errorf("ed448: bad context length: %v", len(ctx))) } H := sha3.NewShake256() diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go index 13b20fa4b0..557d6f0960 100644 --- a/vendor/github.com/cloudflare/circl/sign/sign.go +++ b/vendor/github.com/cloudflare/circl/sign/sign.go @@ -107,4 +107,7 @@ var ( // ErrContextNotSupported is the error used if a context is not // supported. ErrContextNotSupported = errors.New("context not supported") + + // ErrContextTooLong is the error used if the context string is too long. + ErrContextTooLong = errors.New("context string too long") ) diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 5f93eeb4e8..88032defe7 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -2,7 +2,9 @@ # This file lists all contributors to the repository. # See hack/generate-authors.sh to make modifications. +7sunarni <710720732@qq.com> Aanand Prasad +Aarni Koskela Aaron Davidson Aaron Feng Aaron Hnatiw @@ -11,6 +13,7 @@ Aaron L. Xu Aaron Lehmann Aaron Welch Aaron Yoshitake +Abdur Rehman Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -24,9 +27,11 @@ Adam Avilla Adam Dobrawy Adam Eijdenberg Adam Kunk +Adam Lamers Adam Miller Adam Mills Adam Pointer +Adam Simon Adam Singer Adam Thornton Adam Walz @@ -119,6 +124,7 @@ amangoel Amen Belayneh Ameya Gawde Amir Goldstein +AmirBuddy Amit Bakshi Amit Krishnan Amit Shukla @@ -168,6 +174,7 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins +Andrés Maldonado Andy Chambers andy diller Andy Goldstein @@ -219,6 +226,7 @@ Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Ashly Mathew Austin Vazquez averagehuman Avi Das @@ -345,6 +353,7 @@ Chance Zibolski Chander Govindarajan Chanhun Jeong Chao Wang +Charity Kathure Charles Chan Charles Hooper Charles Law @@ -480,6 +489,7 @@ Daniel Farrell Daniel Garcia Daniel Gasienica Daniel Grunwell +Daniel Guns Daniel Helfand Daniel Hiltgen Daniel J Walsh @@ -763,6 +773,7 @@ Frank Macreery Frank Rosquin Frank Villaro-Dixon Frank Yang +François Scala Fred Lifton Frederick F. Kautz IV Frederico F. de Oliveira @@ -798,6 +809,7 @@ GennadySpb Geoff Levand Geoffrey Bachelet Geon Kim +George Adams George Kontridze George Ma George MacRorie @@ -826,6 +838,7 @@ Gopikannan Venugopalsamy Gosuke Miyashita Gou Rao Govinda Fichtner +Grace Choi Grant Millar Grant Reaber Graydon Hoare @@ -966,6 +979,7 @@ James Nugent James Sanders James Turnbull James Watkins-Harvey +Jameson Hyde Jamie Hannaford Jamshid Afshar Jan Breig @@ -1064,13 +1078,16 @@ Jim Perrin Jimmy Cuadra Jimmy Puckett Jimmy Song +jinjiadu Jinsoo Park Jintao Zhang Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +jjimbo137 <115816493+jjimbo137@users.noreply.github.com> Joakim Roubert +Joan Grau Joao Fernandes Joao Trindade Joe Beda @@ -1155,6 +1172,7 @@ Josiah Kiehl José Tomás Albornoz Joyce Jang JP +JSchltggr Julian Taylor Julien Barbier Julien Bisconti @@ -1289,6 +1307,7 @@ Laura Brehm Laura Frank Laurent Bernaille Laurent Erignoux +Laurent Goderre Laurie Voss Leandro Motta Barros Leandro Siqueira @@ -1369,6 +1388,7 @@ Madhan Raj Mookkandy Madhav Puri Madhu Venugopal Mageee +maggie44 <64841595+maggie44@users.noreply.github.com> Mahesh Tiyyagura malnick Malte Janduda @@ -1579,6 +1599,7 @@ Muayyad Alsadi Muhammad Zohaib Aslam Mustafa Akın Muthukumar R +Myeongjoon Kim Máximo Cuadros Médi-Rémi Hashim Nace Oroz @@ -1593,6 +1614,7 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Baulch Nathan Carlson Nathan Herald Nathan Hsieh @@ -1655,6 +1677,7 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified +Octol1ttle Odin Ugedal Oguz Bilgic Oh Jinkyun @@ -1763,6 +1786,7 @@ Pierre Carrier Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE +pinglanlu Piotr Bogdan Piotr Karbowski Porjo @@ -1790,6 +1814,7 @@ Quentin Tayssier r0n22 Rachit Sharma Radostin Stoyanov +Rafael Fernández López Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1856,7 +1881,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho -Rodrigo Campos +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1995,6 +2020,7 @@ Sevki Hasirci Shane Canon Shane da Silva Shaun Kaasten +Shaun Thompson shaunol Shawn Landden Shawn Siefkas @@ -2013,6 +2039,7 @@ Shijun Qin Shishir Mahajan Shoubhik Bose Shourya Sarcar +Shreenidhi Shedi Shu-Wai Chow shuai-z Shukui Yang @@ -2100,6 +2127,7 @@ Sébastien Stormacq Sören Tempel Tabakhase Tadej Janež +Tadeusz Dudkiewicz Takuto Sato tang0th Tangi Colin @@ -2107,6 +2135,7 @@ Tatsuki Sugiura Tatsushi Inagaki Taylan Isikdemir Taylor Jones +tcpdumppy <847462026@qq.com> Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju @@ -2391,6 +2420,7 @@ You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF Youfu Zhang +YR Chen Yu Changchun Yu Chengxia Yu Peng diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 93d64cd8d5..2c62cd4032 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.47" + DefaultVersion = "1.48" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 7164e1eba5..646032d6e0 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.47" +basePath: "/v1.48" info: title: "Docker Engine API" - version: "1.47" + version: "1.48" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,14 +55,14 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.47) is used. - For example, calling `/info` is the same as calling `/v1.47/info`. Using the + If you omit the version-prefix, the current version of the API (v1.48) is used. + For example, calling `/info` is the same as calling `/v1.48/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. - The API uses an open schema model, which means server may add extra properties + The API uses an open schema model, which means the server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer @@ -212,6 +212,7 @@ definitions: - `bind` a mount of a file or directory from the host into the container. - `volume` a docker volume with the given `Name`. + - `image` a docker image - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - `cluster` a Swarm cluster volume @@ -219,6 +220,7 @@ definitions: enum: - "bind" - "volume" + - "image" - "tmpfs" - "npipe" - "cluster" @@ -350,6 +352,7 @@ definitions: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `image` Mounts an image. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - `cluster` a Swarm cluster volume @@ -357,6 +360,7 @@ definitions: enum: - "bind" - "volume" + - "image" - "tmpfs" - "npipe" - "cluster" @@ -431,6 +435,14 @@ definitions: description: "Source path inside the volume. Must be relative without any back traversals." type: "string" example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" @@ -953,13 +965,18 @@ definitions: ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" + example: "" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. type: "string" enum: + - "local" - "json-file" - "syslog" - "journald" @@ -970,9 +987,14 @@ definitions: - "etwlogs" - "none" Config: + description: |- + Driver-specific configuration options for the logging driver. type: "object" additionalProperties: type: "string" + example: + "max-file": "5" + "max-size": "10m" NetworkMode: type: "string" description: | @@ -1015,6 +1037,7 @@ definitions: items: type: "integer" minimum: 0 + example: [80, 64] Annotations: type: "object" description: | @@ -1117,7 +1140,8 @@ definitions: - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" - description: "Gives the container full access to the host." + description: |- + Gives the container full access to the host. PublishAllPorts: type: "boolean" description: | @@ -1174,18 +1198,20 @@ definitions: minimum: 0 Sysctls: type: "object" - description: | + x-nullable: true + description: |- A list of kernel parameters (sysctls) to set in the container. - For example: - ``` - {"net.ipv4.ip_forward": "1"} - ``` + This field is omitted if not set. additionalProperties: type: "string" + example: + "net.ipv4.ip_forward": "1" Runtime: type: "string" - description: "Runtime to use with this container." + x-nullable: true + description: |- + Runtime to use with this container. # Applicable to Windows Isolation: type: "string" @@ -1195,6 +1221,7 @@ definitions: - "default" - "process" - "hyperv" + - "" MaskedPaths: type: "array" description: | @@ -1202,6 +1229,18 @@ definitions: the default set of paths). items: type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" ReadonlyPaths: type: "array" description: | @@ -1209,6 +1248,12 @@ definitions: (this overrides the default set of paths). items: type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" ContainerConfig: description: | @@ -1225,8 +1270,14 @@ definitions: The domain name to use for the container. type: "string" User: - description: "The user that commands are run as inside the container." + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). type: "string" + example: "123:456" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" @@ -1917,7 +1968,7 @@ definitions: type: "string" example: "4443" - GraphDriverData: + DriverData: description: | Information about the storage driver used to store the container's and image's filesystem. @@ -1991,6 +2042,33 @@ definitions: type: "string" x-nullable: false example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" RepoTags: description: | List of image names/tags in the local image cache that reference this @@ -2107,7 +2185,7 @@ definitions: format: "int64" example: 1239828 GraphDriver: - $ref: "#/definitions/GraphDriverData" + $ref: "#/definitions/DriverData" RootFS: description: | Information about the image's RootFS, including the layer IDs. @@ -2278,6 +2356,18 @@ definitions: x-omitempty: true items: $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" AuthConfig: type: "object" @@ -2497,6 +2587,11 @@ definitions: `overlay`). type: "string" example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true EnableIPv6: description: | Whether the network was created with IPv6 enabled. @@ -2697,12 +2792,24 @@ definitions: type: "string" error: type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" aux: @@ -2802,12 +2909,24 @@ definitions: type: "string" error: type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" @@ -2816,10 +2935,24 @@ definitions: properties: error: type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" @@ -2851,9 +2984,10 @@ definitions: example: message: "Something went wrong." - IdResponse: + IDResponse: description: "Response to an API call that returns just an Id" type: "object" + x-go-name: "IDResponse" required: ["Id"] properties: Id: @@ -2898,6 +3032,16 @@ definitions: example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "number" + example: + - 10 # Operational data NetworkID: @@ -4180,6 +4324,7 @@ definitions: - "default" - "process" - "hyperv" + - "" Init: description: | Run an init inside the container that forwards signals and reaps @@ -4984,76 +5129,346 @@ definitions: Warnings: - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + ContainerSummary: type: "object" properties: Id: - description: "The ID of this container" + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). type: "string" x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" Names: - description: "The names that this container has been given" + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). type: "array" items: type: "string" + example: + - "/funny_chatelet" Image: - description: "The name of the image used when creating this container" + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). type: "string" + example: "docker.io/library/ubuntu:latest" ImageID: - description: "The ID of the image that this container was created from" + description: |- + The ID (digest) of the image that this container was created from. type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. Command: description: "Command to run when starting the container" type: "string" + example: "/bin/bash" Created: - description: "When the container was created" + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). type: "integer" format: "int64" + example: "1739811096" Ports: - description: "The ports exposed by this container" + description: |- + Port-mappings for the container. type: "array" items: $ref: "#/definitions/Port" SizeRw: - description: "The size of files that have been created or changed by this container" + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. type: "integer" format: "int64" + x-nullable: true + example: "122880" SizeRootFs: - description: "The total size of all the files in this container" + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. type: "integer" format: "int64" + x-nullable: true + example: "1653948416" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" State: - description: "The state of this container (e.g. `Exited`)" + description: | + The state of this container. type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) type: "string" + example: "Up 4 days" HostConfig: type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. properties: NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. type: "string" + example: "mynetwork" Annotations: - description: "Arbitrary key-value metadata attached to container" + description: |- + Arbitrary key-value metadata attached to the container. type: "object" x-nullable: true additionalProperties: type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" NetworkSettings: - description: "A summary of the container's network settings" + description: |- + Summary of the container's network settings type: "object" properties: Networks: type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" + description: |- + List of mounts used by the container. items: $ref: "#/definitions/MountPoint" @@ -5093,8 +5508,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5145,8 +5563,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5258,48 +5677,660 @@ definitions: type: "string" example: [] - ContainerWaitResponse: - description: "OK response to ContainerWait operation" + ContainerUpdateResponse: type: "object" - x-go-name: "WaitResponse" - title: "ContainerWaitResponse" - required: [StatusCode] + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - format: "int64" - x-nullable: false - Error: - $ref: "#/definitions/ContainerWaitExitError" + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] - ContainerWaitExitError: - description: "container waiting error, if any" + ContainerStatsResponse: + description: | + Statistics sample for a container. type: "object" - x-go-name: "WaitExitError" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" properties: - Message: - description: "Details of an error" + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). - SystemVersion: - type: "object" + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: description: | - Response of Engine API: GET "/version" + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: + io_service_bytes_recursive: type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: description: | - Information about system components + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" items: - type: "object" - x-go-name: ComponentVersion + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: 18446744073709551615 + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. when using cgroups v2. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion required: [Name, Version] properties: Name: @@ -5507,13 +6538,28 @@ definitions: type: "boolean" example: true BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. type: "boolean" - example: true + example: false BridgeNfIp6tables: - description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + description: | + Indicates if `bridge-nf-call-ip6tables` is available on the host. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand, and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. type: "boolean" - example: true + example: false Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level @@ -5750,6 +6796,7 @@ definitions: - "default" - "hyperv" - "process" + - "" InitBinary: description: | Name and, optional, path of the `docker-init` binary. @@ -5820,8 +6867,6 @@ definitions: type: "string" example: - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" CDISpecDirs: description: | List of directories where (Container Device Interface) CDI @@ -5944,55 +6989,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. This field will be removed in a API v1.49. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. This field will be removed in a API v1.49. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -6000,7 +7017,7 @@ definitions: accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. - By default, local registries (`127.0.0.0/8`) are configured as + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. @@ -6165,6 +7182,8 @@ definitions: Expected: description: | Commit ID of external tool expected by dockerd as set at build time. + + **Deprecated**: This field is deprecated and will be omitted in a API v1.49. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" @@ -6335,7 +7354,7 @@ definitions: description: | The media type of the object this schema refers to. type: "string" - example: "application/vnd.docker.distribution.manifest.v2+json" + example: "application/vnd.oci.image.manifest.v1+json" digest: description: | The digest of the targeted content. @@ -6346,27 +7365,52 @@ definitions: The size in bytes of the blob. type: "integer" format: "int64" - example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null OCIPlatform: type: "object" x-go-name: Platform + x-nullable: true description: | Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). @@ -6836,143 +7880,6 @@ paths: type: "array" items: $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.image.id: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - io.kubernetes.image.name: "ubuntu:latest" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.config.source: "api" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] 400: description: "bad parameter" schema: @@ -7197,238 +8104,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - $ref: "#/definitions/ContainerState" - Image: - description: "The container's image ID" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - Platform: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - description: "IDs of exec instances that are running in the container." - type: "array" - items: - type: "string" - x-nullable: true - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: | - The size of files that have been created or changed by this - container. - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Healthcheck: - Test: ["CMD-SHELL", "exit 0"] - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "overlay2" - ExecIDs: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - IpcMode: "" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - Health: - Status: "healthy" - FailingStreak: 0 - Log: - - Start: "2019-12-22T10:59:05.6385933Z" - End: "2019-12-22T10:59:05.8078452Z" - ExitCode: 0 - Output: "" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" + $ref: "#/definitions/ContainerInspectResponse" 404: description: "no such container" schema: @@ -7463,54 +8139,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "ContainerTopResponse" - description: "OK response to ContainerTop operation" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: | - Each process running in the container, where each is process - is an array of values corresponding to the titles. - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" + $ref: "#/definitions/ContainerTopResponse" 404: description: "no such container" schema: @@ -7720,99 +8349,7 @@ paths: 200: description: "no error" schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 + $ref: "#/definitions/ContainerStatsResponse" 404: description: "no such container" schema: @@ -7876,10 +8413,12 @@ paths: type: "string" - name: "h" in: "query" + required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" + required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] @@ -8041,14 +8580,7 @@ paths: 200: description: "The container has been updated." schema: - type: "object" - title: "ContainerUpdateResponse" - description: "OK response to ContainerUpdate operation" - properties: - Warnings: - type: "array" - items: - type: "string" + $ref: "#/definitions/ContainerUpdateResponse" 404: description: "no such container" schema: @@ -8955,10 +9487,29 @@ paths: operationId: "BuildPrune" parameters: - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.49. + type: "integer" + format: "int64" + - name: "reserved-space" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" - name: "all" in: "query" type: "boolean" @@ -9025,7 +9576,13 @@ paths: parameters: - name: "fromImage" in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. type: "string" - name: "fromSrc" in: "query" @@ -9119,6 +9676,12 @@ paths: description: "Image name or id" type: "string" required: true + - name: "manifests" + in: "query" + description: "Include Manifests in the image summary." + type: "boolean" + default: false + required: false tags: ["Image"] /images/{name}/history: get: @@ -9197,6 +9760,20 @@ paths: description: "Image name or ID" type: "string" required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/{name}/push: post: @@ -9244,6 +9821,19 @@ paths: all tags of the given image that are present in the local image store are pushed. type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - name: "X-Registry-Auth" in: "header" description: | @@ -9253,11 +9843,6 @@ paths: details. type: "string" required: true - - name: "platform" - in: "query" - description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)" - type: "string" - x-nullable: true tags: ["Image"] /images/{name}/tag: post: @@ -9553,7 +10138,7 @@ paths: type: "string" example: "OK" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: @@ -9609,7 +10194,7 @@ paths: type: "string" example: "(empty)" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: @@ -9648,7 +10233,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 404: description: "no such container" schema: @@ -9942,7 +10527,16 @@ paths: description: "Image name or ID" type: "string" required: true - tags: ["Image"] + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` /images/get: get: summary: "Export several images" @@ -10009,6 +10603,16 @@ paths: description: "Suppress progress details during load." type: "boolean" default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /containers/{id}/exec: post: @@ -10023,7 +10627,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 404: description: "no such container" schema: @@ -10065,6 +10669,7 @@ paths: items: type: "integer" minimum: 0 + example: [80, 64] DetachKeys: type: "string" description: | @@ -10151,9 +10756,11 @@ paths: Detach: type: "boolean" description: "Detach from the command." + example: false Tty: type: "boolean" description: "Allocate a pseudo-TTY." + example: true ConsoleSize: type: "array" description: "Initial console size, as an `[height, width]` array." @@ -10163,10 +10770,7 @@ paths: items: type: "integer" minimum: 0 - example: - Detach: false - Tty: true - ConsoleSize: [80, 64] + example: [80, 64] - name: "id" in: "path" description: "Exec instance ID" @@ -10203,10 +10807,12 @@ paths: type: "string" - name: "h" in: "query" + required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" + required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] @@ -10520,6 +11126,7 @@ paths: Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" + EnableIPv4: true EnableIPv6: false Internal: false Attachable: false @@ -10541,6 +11148,7 @@ paths: Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" + EnableIPv4: false EnableIPv6: false Internal: false Attachable: false @@ -10555,6 +11163,7 @@ paths: Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" + EnableIPv4: false EnableIPv6: false Internal: false Attachable: false @@ -10740,6 +11349,10 @@ paths: IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" @@ -10817,6 +11430,7 @@ paths: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" MacAddress: "02:42:ac:12:05:02" + Priority: 100 tags: ["Network"] /networks/{id}/disconnect: @@ -11622,6 +12236,7 @@ paths: example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" @@ -12492,7 +13107,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 409: description: "name conflicts with an existing object" schema: @@ -12699,7 +13314,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 409: description: "name conflicts with an existing object" schema: diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index df791f02a0..dce8260f32 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -11,7 +11,7 @@ import ( "github.com/docker/docker/api/types/registry" ) -// NewHijackedResponse intializes a HijackedResponse type +// NewHijackedResponse initializes a [HijackedResponse] type. func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} } @@ -129,14 +129,6 @@ type ImageBuildResponse struct { OSType string } -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func(context.Context) (string, error) - // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { Filters filters.Args @@ -235,11 +227,18 @@ type PluginDisableOptions struct { // PluginInstallOptions holds parameters to install a plugin. type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error) Args []string } diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/common/id_response.go similarity index 87% rename from vendor/github.com/docker/docker/api/types/id_response.go rename to vendor/github.com/docker/docker/api/types/common/id_response.go index 7592d2f8b1..22e8c60a48 100644 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ b/vendor/github.com/docker/docker/api/types/common/id_response.go @@ -1,10 +1,10 @@ -package types +package common // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse +// swagger:model IDResponse type IDResponse struct { // The id of the newly created object. diff --git a/vendor/github.com/docker/docker/api/types/container/commit.go b/vendor/github.com/docker/docker/api/types/container/commit.go new file mode 100644 index 0000000000..6fd1b0ead1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/commit.go @@ -0,0 +1,7 @@ +package container + +import "github.com/docker/docker/api/types/common" + +// CommitResponse response for the commit API call, containing the ID of the +// image that was produced. +type CommitResponse = common.IDResponse diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/docker/docker/api/types/container/container.go index 711af12c99..65fabbf425 100644 --- a/vendor/github.com/docker/docker/api/types/container/container.go +++ b/vendor/github.com/docker/docker/api/types/container/container.go @@ -4,8 +4,22 @@ import ( "io" "os" "time" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/storage" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// +// Deprecated: use [UpdateResponse]. This alias will be removed in the next release. +type ContainerUpdateOKBody = UpdateResponse + +// ContainerTopOKBody OK response to ContainerTop operation +// +// Deprecated: use [TopResponse]. This alias will be removed in the next release. +type ContainerTopOKBody = TopResponse + // PruneReport contains the response for Engine API: // POST "/containers/prune" type PruneReport struct { @@ -42,3 +56,133 @@ type StatsResponseReader struct { Body io.ReadCloser `json:"body"` OSType string `json:"ostype"` } + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. + Destination string + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. + Propagation mount.Propagation +} + +// State stores container's running state +// it's part of ContainerJSONBase and returned by "inspect" command +type State struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// Summary contains response of Engine API: +// GET "/containers/json" +type Summary struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` + } + NetworkSettings *NetworkSettingsSummary + Mounts []MountPoint +} + +// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json" +// for API version 1.18 and older. +// +// TODO(thaJeztah): combine ContainerJSONBase and InspectResponse into a single struct. +// The split between ContainerJSONBase (ContainerJSONBase) and InspectResponse (InspectResponse) +// was done in commit 6deaa58ba5f051039643cedceee97c8695e2af74 (https://github.com/moby/moby/pull/13675). +// ContainerJSONBase contained all fields for API < 1.19, and InspectResponse +// held fields that were added in API 1.19 and up. Given that the minimum +// supported API version is now 1.24, we no longer use the separate type. +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *State + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *HostConfig + GraphDriver storage.DriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// InspectResponse is the response for the GET "/containers/{name:.*}/json" +// endpoint. +type InspectResponse struct { + *ContainerJSONBase + Mounts []MountPoint + Config *Config + NetworkSettings *NetworkSettings + // ImageManifestDescriptor is the descriptor of a platform-specific manifest of the image used to create the container. + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da367..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175ea8..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/exec.go b/vendor/github.com/docker/docker/api/types/container/exec.go index 96093eb5cd..f4b22376ef 100644 --- a/vendor/github.com/docker/docker/api/types/container/exec.go +++ b/vendor/github.com/docker/docker/api/types/container/exec.go @@ -1,5 +1,13 @@ package container +import "github.com/docker/docker/api/types/common" + +// ExecCreateResponse is the response for a successful exec-create request. +// It holds the ID of the exec that was created. +// +// TODO(thaJeztah): make this a distinct type. +type ExecCreateResponse = common.IDResponse + // ExecOptions is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecOptions struct { diff --git a/vendor/github.com/docker/docker/api/types/container/health.go b/vendor/github.com/docker/docker/api/types/container/health.go new file mode 100644 index 0000000000..93663746f6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/health.go @@ -0,0 +1,26 @@ +package container + +import "time" + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of [Starting], [Healthy] or [Unhealthy]. + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index 03648fb7b5..83198305e7 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) // CgroupnsMode represents the cgroup namespace mode of the container diff --git a/vendor/github.com/docker/docker/api/types/container/network_settings.go b/vendor/github.com/docker/docker/api/types/container/network_settings.go new file mode 100644 index 0000000000..afec0e5432 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/network_settings.go @@ -0,0 +1,56 @@ +package container + +import ( + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" +) + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds networking state for a container when inspecting it. +type NetworkSettingsBase struct { + Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + + // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + // + // Deprecated: This field is never set and will be removed in a future release. + HairpinMode bool + // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6Address string + // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6PrefixLen int + SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. + SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// NetworkSettingsSummary provides a summary of container's networks +// in /containers/json +type NetworkSettingsSummary struct { + Networks map[string]*network.EndpointSettings +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/container/port.go similarity index 96% rename from vendor/github.com/docker/docker/api/types/port.go rename to vendor/github.com/docker/docker/api/types/container/port.go index d91234744c..895043cfe9 100644 --- a/vendor/github.com/docker/docker/api/types/port.go +++ b/vendor/github.com/docker/docker/api/types/container/port.go @@ -1,4 +1,4 @@ -package types +package container // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command diff --git a/vendor/github.com/docker/docker/api/types/container/stats.go b/vendor/github.com/docker/docker/api/types/container/stats.go index 3b3fb131a2..3bfeb4849f 100644 --- a/vendor/github.com/docker/docker/api/types/container/stats.go +++ b/vendor/github.com/docker/docker/api/types/container/stats.go @@ -148,7 +148,15 @@ type PidsStats struct { } // Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { +// +// Deprecated: use [StatsResponse] instead. This type will be removed in the next release. +type Stats = StatsResponse + +// StatsResponse aggregates all types of stats of one container. +type StatsResponse struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + // Common stats Read time.Time `json:"read"` PreRead time.Time `json:"preread"` @@ -162,20 +170,8 @@ type Stats struct { StorageStats StorageStats `json:"storage_stats,omitempty"` // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsResponse is newly used Networks. -// -// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801 -type StatsResponse struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + Networks map[string]NetworkStats `json:"networks,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/container/top_response.go b/vendor/github.com/docker/docker/api/types/container/top_response.go new file mode 100644 index 0000000000..b4bae5ef03 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/top_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// TopResponse ContainerTopResponse +// +// Container "top" response. +// swagger:model TopResponse +type TopResponse struct { + + // Each process running in the container, where each process + // is an array of values corresponding to the titles. + Processes [][]string `json:"Processes"` + + // The ps column titles + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/update_response.go b/vendor/github.com/docker/docker/api/types/container/update_response.go new file mode 100644 index 0000000000..e2b5bf5ac0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/update_response.go @@ -0,0 +1,14 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// UpdateResponse ContainerUpdateResponse +// +// Response for a successful container-update. +// swagger:model UpdateResponse +type UpdateResponse struct { + + // Warnings encountered when updating the container. + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go index f52f694408..b8a690d67a 100644 --- a/vendor/github.com/docker/docker/api/types/filters/errors.go +++ b/vendor/github.com/docker/docker/api/types/filters/errors.go @@ -22,16 +22,3 @@ func (e invalidFilter) Error() string { // InvalidParameter marks this error as ErrInvalidParameter func (e invalidFilter) InvalidParameter() {} - -// unreachableCode is an error indicating that the code path was not expected to be reached. -type unreachableCode struct { - Filter string - Value []string -} - -// System marks this error as ErrSystem -func (e unreachableCode) System() {} - -func (e unreachableCode) Error() string { - return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value) -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 0914b2a441..2085ff38f2 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -200,7 +200,6 @@ func (args Args) Match(field, source string) bool { // Error is not nil only if the filter values are not valid boolean or are conflicting. func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { fieldValues, ok := args.fields[key] - if !ok { return defaultValue, nil } @@ -211,20 +210,11 @@ func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { isFalse := fieldValues["0"] || fieldValues["false"] isTrue := fieldValues["1"] || fieldValues["true"] - - conflicting := isFalse && isTrue - invalid := !isFalse && !isTrue - - if conflicting || invalid { + if isFalse == isTrue { + // Either no or conflicting truthy/falsy value were provided return defaultValue, &invalidFilter{key, args.Get(key)} - } else if isFalse { - return false, nil - } else if isTrue { - return true, nil } - - // This code shouldn't be reached. - return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} + return isTrue, nil } // ExactMatch returns true if the source matches exactly one of the values. diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/docker/docker/api/types/image/image_inspect.go new file mode 100644 index 0000000000..78e81f052c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_inspect.go @@ -0,0 +1,140 @@ +package image + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/storage" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` +} + +// InspectResponse contains response of Engine API: +// GET "/images/{name:.*}/json" +type InspectResponse struct { + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + // + // This information is only available if present in the image, + // and omitted otherwise. + Created string `json:",omitempty"` + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. + Container string `json:",omitempty"` + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. + ContainerConfig *container.Config `json:",omitempty"` + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *container.Config + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + VirtualSize int64 `json:"VirtualSize,omitempty"` + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver storage.DriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata Metadata + + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // Only available if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 923ebe5a06..919510fe37 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -38,7 +38,7 @@ type PullOptions struct { // authentication header value in base64 encoded format, or an error if the // privilege request fails. // - // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. PrivilegeFunc func(context.Context) (string, error) Platform string } @@ -53,7 +53,7 @@ type PushOptions struct { // authentication header value in base64 encoded format, or an error if the // privilege request fails. // - // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. PrivilegeFunc func(context.Context) (string, error) // Platform is an optional field that selects a specific platform to push @@ -86,3 +86,31 @@ type RemoveOptions struct { Force bool PruneChildren bool } + +// HistoryOptions holds parameters to get image history. +type HistoryOptions struct { + // Platform from the manifest list to use for history. + Platform *ocispec.Platform +} + +// LoadOptions holds parameters to load images. +type LoadOptions struct { + // Quiet suppresses progress output + Quiet bool + + // Platforms selects the platforms to load if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} + +type InspectOptions struct { + // Manifests returns the image manifests. + Manifests bool +} + +// SaveOptions holds parameters to save images. +type SaveOptions struct { + // Platforms selects the platforms to save if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go index e87e216a28..c5ae6ab9ca 100644 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,5 +1,7 @@ package image +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" + type Summary struct { // Number of containers using this image. Includes both stopped and running @@ -42,6 +44,13 @@ type Summary struct { // Required: true ParentID string `json:"ParentId"` + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + // Manifests is a list of image manifests available in this image. It // provides a more detailed view of the platform-specific image manifests or // other image-attached data like build attestations. diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index c68dcf65bd..d98dbec991 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -19,6 +19,8 @@ const ( TypeNamedPipe Type = "npipe" // TypeCluster is the type for Swarm Cluster Volumes. TypeCluster Type = "cluster" + // TypeImage is the type for mounting another image's filesystem + TypeImage Type = "image" ) // Mount represents a mount (volume). @@ -34,6 +36,7 @@ type Mount struct { BindOptions *BindOptions `json:",omitempty"` VolumeOptions *VolumeOptions `json:",omitempty"` + ImageOptions *ImageOptions `json:",omitempty"` TmpfsOptions *TmpfsOptions `json:",omitempty"` ClusterOptions *ClusterOptions `json:",omitempty"` } @@ -100,6 +103,10 @@ type VolumeOptions struct { DriverConfig *Driver `json:",omitempty"` } +type ImageOptions struct { + Subpath string `json:",omitempty"` +} + // Driver represents a volume driver. type Driver struct { Name string `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go index 0fbb40b351..167ac70ab5 100644 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -19,6 +19,12 @@ type EndpointSettings struct { // generated address). MacAddress string DriverOpts map[string]string + + // GwPriority determines which endpoint will provide the default gateway + // for the container. The endpoint with the highest priority will be used. + // If multiple endpoints have the same priority, they are lexicographically + // sorted based on their network name, and the one that sorts first is picked. + GwPriority int // Operational data NetworkID string EndpointID string diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index c8db97a7e6..d34b8ab724 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -33,6 +33,7 @@ type CreateRequest struct { type CreateOptions struct { Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4. EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. IPAM *IPAM // IPAM is the network's IP Address Management. Internal bool // Internal represents if the network is used internal only. @@ -76,7 +77,8 @@ type Inspect struct { Created time.Time // Created is the time the network created Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + EnableIPv4 bool // EnableIPv4 represents whether IPv4 is enabled + EnableIPv6 bool // EnableIPv6 represents whether IPv6 is enabled IPAM IPAM // IPAM is the network's IP Address Management Internal bool // Internal represents if the network is used internal only Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go index 8e383f6e60..ebd5e4b9e2 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -1,17 +1,29 @@ package registry // import "github.com/docker/docker/api/types/registry" import ( + "context" "encoding/base64" "encoding/json" + "fmt" "io" "strings" - - "github.com/pkg/errors" ) // AuthHeader is the name of the header used to send encoded registry // authorization credentials for registry operations (push/pull). const AuthHeader = "X-Registry-Auth" +// RequestAuthConfig is a function interface that clients can supply +// to retry operations after getting an authorization error. +// +// The function must return the [AuthHeader] value ([AuthConfig]), encoded +// in base64url format ([RFC4648, section 5]), which can be decoded by +// [DecodeAuthConfig]. +// +// It must return an error if the privilege request fails. +// +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 +type RequestAuthConfig func(context.Context) (string, error) + // AuthConfig contains authorization information for connecting to a Registry. type AuthConfig struct { Username string `json:"username,omitempty"` @@ -85,7 +97,7 @@ func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { } func invalid(err error) error { - return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")} + return errInvalidParameter{fmt.Errorf("invalid X-Registry-Auth header: %w", err)} } type errInvalidParameter struct{ error } diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 75ee07b15f..8117cb09e7 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -9,11 +9,29 @@ import ( // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string + AllowNondistributableArtifactsCIDRs []*NetIPNet `json:"AllowNondistributableArtifactsCIDRs,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. + AllowNondistributableArtifactsHostnames []string `json:"AllowNondistributableArtifactsHostnames,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. + + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// MarshalJSON implements a custom marshaler to include legacy fields +// in API responses. +func (sc ServiceConfig) MarshalJSON() ([]byte, error) { + tmp := map[string]interface{}{ + "InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs, + "IndexConfigs": sc.IndexConfigs, + "Mirrors": sc.Mirrors, + } + if sc.AllowNondistributableArtifactsCIDRs != nil { + tmp["AllowNondistributableArtifactsCIDRs"] = nil + } + if sc.AllowNondistributableArtifactsHostnames != nil { + tmp["AllowNondistributableArtifactsHostnames"] = nil + } + return json.Marshal(tmp) } // NetIPNet is the net.IPNet type, which can be marshalled and @@ -31,15 +49,17 @@ func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { } // UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) error { var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } + if err := json.Unmarshal(b, &ipnetStr); err != nil { + return err + } + _, cidr, err := net.ParseCIDR(ipnetStr) + if err != nil { + return err } - return + *ipnet = NetIPNet(*cidr) + return nil } // IndexInfo contains information about a registry diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/docker/docker/api/types/registry/search.go index a0a1eec544..994ca4c6f9 100644 --- a/vendor/github.com/docker/docker/api/types/registry/search.go +++ b/vendor/github.com/docker/docker/api/types/registry/search.go @@ -10,11 +10,12 @@ import ( type SearchOptions struct { RegistryAuth string - // PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can - // supply to retry operations after getting an authorization error. + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. // - // It must return the registry authentication header value in base64 - // format, or an error if the privilege request fails. + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. PrivilegeFunc func(context.Context) (string, error) Filters filters.Args Limit int diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/storage/driver_data.go similarity index 75% rename from vendor/github.com/docker/docker/api/types/graph_driver_data.go rename to vendor/github.com/docker/docker/api/types/storage/driver_data.go index ce3deb331c..009e213095 100644 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ b/vendor/github.com/docker/docker/api/types/storage/driver_data.go @@ -1,13 +1,13 @@ -package types +package storage // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// GraphDriverData Information about the storage driver used to store the container's and +// DriverData Information about the storage driver used to store the container's and // image's filesystem. // -// swagger:model GraphDriverData -type GraphDriverData struct { +// swagger:model DriverData +type DriverData struct { // Low-level storage metadata, provided as key/value pairs. // diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go index 16202ccce6..f9a65187ff 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -12,6 +12,12 @@ type Config struct { // ConfigSpec represents a config specification from a config in swarm type ConfigSpec struct { Annotations + + // Data is the data to store as a config. + // + // The maximum allowed size is 1000KB, as defined in [MaxConfigSize]. + // + // [MaxConfigSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize Data []byte `json:",omitempty"` // Templating controls whether and how to evaluate the config payload as diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go index d5213ec981..aeb5bb54ad 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -12,8 +12,22 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Data is the data to store as a secret. It must be empty if a + // [Driver] is used, in which case the data is loaded from an external + // secret store. The maximum allowed size is 500KB, as defined in + // [MaxSecretSize]. + // + // This field is only used to create the secret, and is not returned + // by other endpoints. + // + // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize + Data []byte `json:",omitempty"` + + // Driver is the name of the secrets driver used to fetch the secret's + // value from an external secret store. If not set, the default built-in + // store is used. + Driver *Driver `json:",omitempty"` // Templating controls whether and how to evaluate the secret payload as // a template. If it is not set, no templating is used. diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index c66a2afb8b..8a2444da28 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -29,8 +29,8 @@ type Info struct { CPUSet bool PidsLimit bool IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + BridgeNfIptables bool `json:"BridgeNfIptables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release. + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release. Debug bool NFd int OomKillDisable bool @@ -137,8 +137,13 @@ type PluginsInfo struct { // Commit holds the Git-commit (SHA1) that a binary was built from, as reported // in the version-string of external tools, such as containerd, or runC. type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. + // ID is the actual commit ID or version of external tool. + ID string + + // Expected is the commit ID of external tool expected by dockerd as set at build time. + // + // Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions. + Expected string } // NetworkAddressPool is a temp struct used by [Info] struct. diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index fe99b74392..82ae339c31 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -6,11 +6,8 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" - "github.com/docker/go-connections/nat" ) const ( @@ -21,145 +18,6 @@ const ( MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" ) -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string `json:",omitempty"` - Layers []string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - ID string `json:"Id"` - - // RepoTags is a list of image names/tags in the local image cache that - // reference this image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - RepoTags []string - - // RepoDigests is a list of content-addressable digests of locally available - // image manifests that the image is referenced from. Multiple manifests can - // refer to the same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - RepoDigests []string - - // Parent is the ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - Parent string - - // Comment is an optional message that can be set when committing or - // importing the image. - Comment string - - // Created is the date and time at which the image was created, formatted in - // RFC 3339 nano-seconds (time.RFC3339Nano). - // - // This information is only available if present in the image, - // and omitted otherwise. - Created string `json:",omitempty"` - - // Container is the ID of the container that was used to create the image. - // - // Depending on how the image was created, this field may be empty. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - Container string `json:",omitempty"` - - // ContainerConfig is an optional field containing the configuration of the - // container that was last committed when creating the image. - // - // Previous versions of Docker builder used this field to store build cache, - // and it is not in active use anymore. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - ContainerConfig *container.Config `json:",omitempty"` - - // DockerVersion is the version of Docker that was used to build the image. - // - // Depending on how the image was created, this field may be empty. - DockerVersion string - - // Author is the name of the author that was specified when committing the - // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string - Config *container.Config - - // Architecture is the hardware CPU architecture that the image runs on. - Architecture string - - // Variant is the CPU architecture variant (presently ARM-only). - Variant string `json:",omitempty"` - - // OS is the Operating System the image is built to run on. - Os string - - // OsVersion is the version of the Operating System the image is built to - // run on (especially for Windows). - OsVersion string `json:",omitempty"` - - // Size is the total size of the image including all layers it is composed of. - Size int64 - - // VirtualSize is the total size of the image including all layers it is - // composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` - - // GraphDriver holds information about the storage driver used to store the - // container's and image's filesystem. - GraphDriver GraphDriverData - - // RootFS contains information about the image's RootFS, including the - // layer IDs. - RootFS RootFS - - // Metadata of the image in the local cache. - // - // This information is local to the daemon, and not part of the image itself. - Metadata image.Metadata -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - Annotations map[string]string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - // Ping contains response of Engine API: // GET "/_ping" type Ping struct { @@ -205,176 +63,6 @@ type Version struct { BuildTime string `json:",omitempty"` } -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release. - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds networking state for a container when inspecting it. -type NetworkSettingsBase struct { - Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. - SandboxID string // SandboxID uniquely represents a container's network stack - SandboxKey string // SandboxKey identifies the sandbox - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // - // Deprecated: This field is never set and will be removed in a future release. - HairpinMode bool - // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6Address string - // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6PrefixLen int - SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. - SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - // Type is the type of mount, see `Type` definitions in - // github.com/docker/docker/api/types/mount.Type - Type mount.Type `json:",omitempty"` - - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name. - Name string `json:",omitempty"` - - // Source is the source location of the mount. - // - // For volumes, this contains the storage location of the volume (within - // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - // the source (host) part of the bind-mount. For `tmpfs` mount points, this - // field is empty. - Source string - - // Destination is the path relative to the container root (`/`) where the - // Source is mounted inside the container. - Destination string - - // Driver is the volume driver used to create the volume (if it is a volume). - Driver string `json:",omitempty"` - - // Mode is a comma separated list of options supplied by the user when - // creating the bind/volume mount. - // - // The default is platform-specific (`"z"` on Linux, empty on Windows). - Mode string - - // RW indicates whether the mount is mounted writable (read-write). - RW bool - - // Propagation describes how mounts are propagated from the host into the - // mount point, and vice-versa. Refer to the Linux kernel documentation - // for details: - // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // - // This field is not used on Windows. - Propagation mount.Propagation -} - // DiskUsageObject represents an object type used for disk usage query filtering. type DiskUsageObject string @@ -401,7 +89,7 @@ type DiskUsageOptions struct { type DiskUsage struct { LayersSize int64 Images []*image.Summary - Containers []*Container + Containers []*container.Summary Volumes []*volume.Volume BuildCache []*BuildCache BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. @@ -481,7 +169,11 @@ type BuildCache struct { // BuildCachePruneOptions hold parameters to prune the build cache type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args + All bool + ReservedSpace int64 + MaxUsedSpace int64 + MinFreeSpace int64 + Filters filters.Args + + KeepStorage int64 // Deprecated: deprecated in API 1.48. } diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index 43ffe104aa..93e4336adc 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -1,210 +1,115 @@ package types import ( + "context" + + "github.com/docker/docker/api/types/common" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/storage" ) -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -// -// Deprecated: use [image.PruneReport]. -type ImagesPruneReport = image.PruneReport - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune". -// -// Deprecated: use [volume.PruneReport]. -type VolumesPruneReport = volume.PruneReport - -// NetworkCreateRequest is the request message sent to the server for network create call. -// -// Deprecated: use [network.CreateRequest]. -type NetworkCreateRequest = network.CreateRequest - -// NetworkCreate is the expected body of the "create network" http request message -// -// Deprecated: use [network.CreateOptions]. -type NetworkCreate = network.CreateOptions - -// NetworkListOptions holds parameters to filter the list of networks with. -// -// Deprecated: use [network.ListOptions]. -type NetworkListOptions = network.ListOptions - -// NetworkCreateResponse is the response message sent by the server for network create call. -// -// Deprecated: use [network.CreateResponse]. -type NetworkCreateResponse = network.CreateResponse - -// NetworkInspectOptions holds parameters to inspect network. -// -// Deprecated: use [network.InspectOptions]. -type NetworkInspectOptions = network.InspectOptions - -// NetworkConnect represents the data to be used to connect a container to the network -// -// Deprecated: use [network.ConnectOptions]. -type NetworkConnect = network.ConnectOptions - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -// -// Deprecated: use [network.DisconnectOptions]. -type NetworkDisconnect = network.DisconnectOptions - -// EndpointResource contains network resources allocated and used for a container in a network. +// IDResponse Response to an API call that returns just an Id. // -// Deprecated: use [network.EndpointResource]. -type EndpointResource = network.EndpointResource +// Deprecated: use either [container.CommitResponse] or [container.ExecCreateResponse]. It will be removed in the next release. +type IDResponse = common.IDResponse -// NetworkResource is the body of the "get network" http response message/ +// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json" +// for API version 1.18 and older. // -// Deprecated: use [network.Inspect] or [network.Summary] (for list operations). -type NetworkResource = network.Inspect +// Deprecated: use [container.InspectResponse] or [container.ContainerJSONBase]. It will be removed in the next release. +type ContainerJSONBase = container.ContainerJSONBase -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" +// ContainerJSON is the response for the GET "/containers/{name:.*}/json" +// endpoint. // -// Deprecated: use [network.PruneReport]. -type NetworksPruneReport = network.PruneReport +// Deprecated: use [container.InspectResponse]. It will be removed in the next release. +type ContainerJSON = container.InspectResponse -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. +// Container contains response of Engine API: +// GET "/containers/json" // -// Deprecated: use [container.ExecOptions]. -type ExecConfig = container.ExecOptions +// Deprecated: use [container.Summary]. +type Container = container.Summary -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package +// ContainerState stores container's running state // -// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions]. -type ExecStartCheck = container.ExecStartOptions +// Deprecated: use [container.State]. +type ContainerState = container.State -// ContainerExecInspect holds information returned by exec inspect. +// NetworkSettings exposes the network settings in the api. // -// Deprecated: use [container.ExecInspect]. -type ContainerExecInspect = container.ExecInspect +// Deprecated: use [container.NetworkSettings]. +type NetworkSettings = container.NetworkSettings -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" +// NetworkSettingsBase holds networking state for a container when inspecting it. // -// Deprecated: use [container.PruneReport]. -type ContainersPruneReport = container.PruneReport +// Deprecated: use [container.NetworkSettingsBase]. +type NetworkSettingsBase = container.NetworkSettingsBase -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. // -// Deprecated: use [container.PathStat]. -type ContainerPathStat = container.PathStat +// Deprecated: use [container.DefaultNetworkSettings]. +type DefaultNetworkSettings = container.DefaultNetworkSettings -// CopyToContainerOptions holds information -// about files to copy into a container. +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json. // -// Deprecated: use [container.CopyToContainerOptions], -type CopyToContainerOptions = container.CopyToContainerOptions +// Deprecated: use [container.NetworkSettingsSummary]. +type SummaryNetworkSettings = container.NetworkSettingsSummary -// ContainerStats contains response of Engine API: -// GET "/stats" -// -// Deprecated: use [container.StatsResponseReader]. -type ContainerStats = container.StatsResponseReader - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -// -// Deprecated: use [container.ThrottlingData]. -type ThrottlingData = container.ThrottlingData - -// CPUUsage stores All CPU stats aggregated since container inception. -// -// Deprecated: use [container.CPUUsage]. -type CPUUsage = container.CPUUsage - -// CPUStats aggregates and wraps all CPU related info of container -// -// Deprecated: use [container.CPUStats]. -type CPUStats = container.CPUStats - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -// -// Deprecated: use [container.MemoryStats]. -type MemoryStats = container.MemoryStats - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -// -// Deprecated: use [container.BlkioStatEntry]. -type BlkioStatEntry = container.BlkioStatEntry - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -// -// Deprecated: use [container.BlkioStats]. -type BlkioStats = container.BlkioStats - -// StorageStats is the disk I/O stats for read/write on Windows. -// -// Deprecated: use [container.StorageStats]. -type StorageStats = container.StorageStats - -// NetworkStats aggregates the network stats of one container -// -// Deprecated: use [container.NetworkStats]. -type NetworkStats = container.NetworkStats +// Health states +const ( + NoHealthcheck = container.NoHealthcheck // Deprecated: use [container.NoHealthcheck]. + Starting = container.Starting // Deprecated: use [container.Starting]. + Healthy = container.Healthy // Deprecated: use [container.Healthy]. + Unhealthy = container.Unhealthy // Deprecated: use [container.Unhealthy]. +) -// PidsStats contains the stats of a container's pids +// Health stores information about the container's healthcheck results. // -// Deprecated: use [container.PidsStats]. -type PidsStats = container.PidsStats +// Deprecated: use [container.Health]. +type Health = container.Health -// Stats is Ultimate struct aggregating all types of stats of one container +// HealthcheckResult stores information about a single run of a healthcheck probe. // -// Deprecated: use [container.Stats]. -type Stats = container.Stats +// Deprecated: use [container.HealthcheckResult]. +type HealthcheckResult = container.HealthcheckResult -// StatsJSON is newly used Networks +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. // -// Deprecated: use [container.StatsResponse]. -type StatsJSON = container.StatsResponse +// Deprecated: use [container.MountPoint]. +type MountPoint = container.MountPoint -// EventsOptions holds parameters to filter events with. +// Port An open port on a container // -// Deprecated: use [events.ListOptions]. -type EventsOptions = events.ListOptions +// Deprecated: use [container.Port]. +type Port = container.Port -// ImageSearchOptions holds parameters to search images with. +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. // -// Deprecated: use [registry.SearchOptions]. -type ImageSearchOptions = registry.SearchOptions +// Deprecated: use [storage.DriverData]. +type GraphDriverData = storage.DriverData -// ImageImportSource holds source information for ImageImport +// RootFS returns Image's RootFS description including the layer IDs. // -// Deprecated: use [image.ImportSource]. -type ImageImportSource image.ImportSource +// Deprecated: use [image.RootFS]. +type RootFS = image.RootFS -// ImageLoadResponse returns information to the client about a load process. +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" // -// Deprecated: use [image.LoadResponse]. -type ImageLoadResponse = image.LoadResponse +// Deprecated: use [image.InspectResponse]. +type ImageInspect = image.InspectResponse -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API. +// RequestPrivilegeFunc is a function interface that clients can supply to +// retry operations after getting an authorization error. +// This function returns the registry authentication header value in base64 +// format, or an error if the privilege request fails. // -// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release. -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} +// Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. +type RequestPrivilegeFunc func(context.Context) (string, error) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go index b76bf366bb..51a73cdb25 100644 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -10,7 +10,7 @@ func (cli *Client) BuildCancel(ctx context.Context, id string) error { query := url.Values{} query.Set("id", id) - serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go index 1a830f4135..92b47d1838 100644 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -17,27 +17,38 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru return nil, err } - report := types.BuildCachePruneReport{} - query := url.Values{} if opts.All { query.Set("all", "1") } - query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) + + if opts.KeepStorage != 0 { + query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) + } + if opts.ReservedSpace != 0 { + query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace))) + } + if opts.MaxUsedSpace != 0 { + query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace))) + } + if opts.MinFreeSpace != 0 { + query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace))) + } f, err := filters.ToJSON(opts.Filters) if err != nil { return nil, errors.Wrap(err, "prune could not marshal filters option") } query.Set("filters", f) - serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + report := types.BuildCachePruneReport{} + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { return nil, errors.Wrap(err, "error retrieving disk usage") } diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/checkpoint.go similarity index 76% rename from vendor/github.com/docker/docker/client/interface_experimental.go rename to vendor/github.com/docker/docker/client/checkpoint.go index c585c10459..f690f7c952 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/checkpoint.go @@ -6,11 +6,11 @@ import ( "github.com/docker/docker/api/types/checkpoint" ) -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints +// CheckpointAPIClient defines API client methods for the checkpoints. +// +// Experimental: checkpoint and restore is still an experimental feature, +// and only available if the daemon is running with experimental features +// enabled. type CheckpointAPIClient interface { CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 9746d288df..7b06fee31d 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -7,8 +7,13 @@ import ( ) // CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) +func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options checkpoint.CreateOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, options, nil) ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index b968c2b237..d15162ea04 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -9,6 +9,11 @@ import ( // CheckpointDelete deletes the checkpoint with the given name from the given container func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.CheckpointDir != "" { query.Set("dir", options.CheckpointDir) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 8feb1f3f7d..9e7963f0bb 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -23,6 +23,6 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options return checkpoints, err } - err = json.NewDecoder(resp.body).Decode(&checkpoints) + err = json.NewDecoder(resp.Body).Decode(&checkpoints) return checkpoints, err } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 60d91bc65b..cd47f05eb2 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -2,7 +2,7 @@ Package client is a Go client for the Docker Engine API. For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/api/ +https://docs.docker.com/reference/api/engine/ # Usage @@ -59,7 +59,6 @@ import ( "github.com/docker/go-connections/sockets" "github.com/pkg/errors" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - "go.opentelemetry.io/otel/trace" ) // DummyHost is a hostname used for local communication. @@ -99,6 +98,9 @@ const DummyHost = "api.moby.localhost" // recent version before negotiation was introduced. const fallbackAPIVersion = "1.24" +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} + // Client is the API client that performs all operations // against a docker server. type Client struct { @@ -138,7 +140,7 @@ type Client struct { // negotiateLock is used to single-flight the version negotiation process negotiateLock sync.Mutex - tp trace.TracerProvider + traceOpts []otelhttp.Option // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). // Store the original transport as the http.Client transport will be wrapped with tracing libs. @@ -200,6 +202,12 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { client: client, proto: hostURL.Scheme, addr: hostURL.Host, + + traceOpts: []otelhttp.Option{ + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + }, } for _, op := range ops { @@ -227,13 +235,7 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } } - c.client.Transport = otelhttp.NewTransport( - c.client.Transport, - otelhttp.WithTracerProvider(c.tp), - otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { - return req.Method + " " + req.URL.Path - }), - ) + c.client.Transport = otelhttp.NewTransport(c.client.Transport, c.traceOpts...) return c, nil } @@ -247,6 +249,14 @@ func (cli *Client) tlsConfig() *tls.Config { func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { transport := &http.Transport{} + // Necessary to prevent long-lived processes using the + // client from leaking connections due to idle connections + // not being released. + // TODO: see if we can also address this from the server side, + // or in go-connections. + // see: https://github.com/moby/moby/issues/45539 + transport.MaxIdleConns = 6 + transport.IdleConnTimeout = 30 * time.Second err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) if err != nil { return nil, err @@ -296,8 +306,7 @@ func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) s var apiPath string _ = cli.checkVersion(ctx) if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) + apiPath = path.Join(cli.basePath, "/v"+strings.TrimPrefix(cli.version, "v"), p) } else { apiPath = path.Join(cli.basePath, p) } @@ -442,6 +451,10 @@ func (cli *Client) dialerFromTransport() func(context.Context, string, string) ( // // ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return cli.dialer() +} + +func (cli *Client) dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { if dialFn := cli.dialerFromTransport(); dialFn != nil { return dialFn(ctx, cli.proto, cli.addr) diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/client_interfaces.go similarity index 86% rename from vendor/github.com/docker/docker/client/interface.go rename to vendor/github.com/docker/docker/client/client_interfaces.go index cc60a5d13b..f70d8ffa01 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/client_interfaces.go @@ -20,17 +20,23 @@ import ( ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { +// +// Deprecated: use [APIClient] instead. This type will be an alias for [APIClient] in the next release, and removed after. +type CommonAPIClient = stableAPIClient + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + stableAPIClient + CheckpointAPIClient // CheckpointAPIClient is still experimental. +} + +type stableAPIClient interface { ConfigAPIClient ContainerAPIClient DistributionAPIClient ImageAPIClient - NodeAPIClient NetworkAPIClient PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient SystemAPIClient VolumeAPIClient ClientVersion() string @@ -39,27 +45,43 @@ type CommonAPIClient interface { ServerVersion(ctx context.Context) (types.Version, error) NegotiateAPIVersion(ctx context.Context) NegotiateAPIVersionPing(types.Ping) - DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + HijackDialer Dialer() func(context.Context) (net.Conn, error) Close() error + SwarmManagementAPIClient +} + +// SwarmManagementAPIClient defines all methods for managing Swarm-specific +// objects. +type SwarmManagementAPIClient interface { + SwarmAPIClient + NodeAPIClient + ServiceAPIClient + SecretAPIClient + ConfigAPIClient +} + +// HijackDialer defines methods for a hijack dialer. +type HijackDialer interface { + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) } // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) + ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (container.CommitResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) + ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (container.ExecCreateResponse, error) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerInspect(ctx context.Context, container string) (container.InspectResponse, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (container.InspectResponse, []byte, error) ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) + ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) ContainerPause(ctx context.Context, container string) error ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error @@ -71,9 +93,9 @@ type ContainerAPIClient interface { ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error) ContainerStart(ctx context.Context, container string, options container.StartOptions) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error - ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerTop(ctx context.Context, container string, arguments []string) (container.TopResponse, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.UpdateResponse, error) ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error @@ -91,18 +113,30 @@ type ImageAPIClient interface { BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error) + + ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (image.InspectResponse, error) + ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) ([]image.HistoryResponseItem, error) + ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (image.LoadResponse, error) + ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (io.ReadCloser, error) + + ImageAPIClientDeprecated +} + +// ImageAPIClientDeprecated defines deprecated methods of the ImageAPIClient. +type ImageAPIClientDeprecated interface { + // ImageInspectWithRaw returns the image information and its raw representation. + // + // Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option. + ImageInspectWithRaw(ctx context.Context, image string) (image.InspectResponse, []byte, error) } // NetworkAPIClient defines API client methods for the networks diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index 3deb4a8e2a..c7ea6d2eba 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -20,6 +20,6 @@ func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (t return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 2c6c7cb36f..679a42c762 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -11,8 +11,9 @@ import ( // ConfigInspectWithRaw returns the config information with raw data func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if id == "" { - return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + id, err := trimID("contig", id) + if err != nil { + return swarm.Config{}, nil, err } if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil { return swarm.Config{}, nil, err @@ -23,7 +24,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C return swarm.Config{}, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Config{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go index 14dd3813e3..7e4a8ea567 100644 --- a/vendor/github.com/docker/docker/client/config_list.go +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -33,6 +33,6 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio } var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) + err = json.NewDecoder(resp.Body).Decode(&configs) return configs, err } diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index d05b0113aa..a2955c6894 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -4,6 +4,10 @@ import "context" // ConfigRemove removes a config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + id, err := trimID("config", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 6995861df0..ddb219cf6a 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -9,6 +9,10 @@ import ( // ConfigUpdate attempts to update a config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + id, err := trimID("config", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index 6a32e5f664..2e7a13e5c5 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -33,7 +33,12 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) { +func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options container.AttachOptions) (types.HijackedResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return types.HijackedResponse{}, err + } + query := url.Values{} if options.Stream { query.Set("stream", "1") @@ -54,7 +59,7 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{ + return cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{ "Content-Type": {"text/plain"}, }) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 26b3f09158..9b46a1f327 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -7,21 +7,25 @@ import ( "net/url" "github.com/distribution/reference" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" ) // ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) { +func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options container.CommitOptions) (container.CommitResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.CommitResponse{}, err + } + var repository, tag string if options.Reference != "" { ref, err := reference.ParseNormalizedNamed(options.Reference) if err != nil { - return types.IDResponse{}, err + return container.CommitResponse{}, err } if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + return container.CommitResponse{}, errors.New("refusing to create a tag with a digest reference") } ref = reference.TagNameOnly(ref) @@ -32,7 +36,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option } query := url.Values{} - query.Set("container", container) + query.Set("container", containerID) query.Set("repo", repository) query.Set("tag", tag) query.Set("comment", options.Comment) @@ -44,13 +48,13 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option query.Set("pause", "0") } - var response types.IDResponse + var response container.CommitResponse resp, err := cli.post(ctx, "/commit", query, options.Config, nil) defer ensureReaderClosed(resp) if err != nil { return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 8490a3b156..39584d375f 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -16,21 +16,30 @@ import ( // ContainerStatPath returns stat information about a path inside the container filesystem. func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.PathStat{}, err + } + query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - defer ensureReaderClosed(response) + resp, err := cli.head(ctx, "/containers/"+containerID+"/archive", query, nil) + defer ensureReaderClosed(resp) if err != nil { return container.PathStat{}, err } - return getContainerPathStatFromHeader(response.header) + return getContainerPathStatFromHeader(resp.Header) } // CopyToContainer copies content into the container filesystem. // Note that `content` must be a Reader for a TAR archive func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. @@ -42,9 +51,7 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str query.Set("copyUIDGID", "true") } - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) + response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, content, nil) defer ensureReaderClosed(response) if err != nil { return err @@ -56,11 +63,15 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str // CopyFromContainer gets the content from the container and returns it as a Reader // for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, container.PathStat{}, err + } + query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) + resp, err := cli.get(ctx, "/containers/"+containerID+"/archive", query, nil) if err != nil { return nil, container.PathStat{}, err } @@ -71,11 +82,11 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s // copy it locally. Along with the stat info about the local destination, // we have everything we need to handle the multiple possibilities there // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) + stat, err := getContainerPathStatFromHeader(resp.Header) if err != nil { return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) } - return response.body, stat, err + return resp.Body, stat, err } func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 5442d4267d..9bb106f776 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -3,8 +3,11 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "errors" "net/url" "path" + "sort" + "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" @@ -12,12 +15,6 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - // ContainerCreate creates a new container based on the given configuration. // It can be associated with a name, but it's not mandatory. func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { @@ -58,6 +55,22 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize hostConfig.ConsoleSize = [2]uint{0, 0} } + if versions.LessThan(cli.ClientVersion(), "1.44") { + for _, m := range hostConfig.Mounts { + if m.BindOptions != nil { + // ReadOnlyNonRecursive can be safely ignored when API < 1.44 + if m.BindOptions.ReadOnlyForceRecursive { + return response, errors.New("bind-recursive=readonly requires API v1.44 or later") + } + if m.BindOptions.NonRecursive && versions.LessThan(cli.ClientVersion(), "1.40") { + return response, errors.New("bind-recursive=disabled requires API v1.40 or later") + } + } + } + } + + hostConfig.CapAdd = normalizeCapabilities(hostConfig.CapAdd) + hostConfig.CapDrop = normalizeCapabilities(hostConfig.CapDrop) } // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified. @@ -74,19 +87,19 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config query.Set("name", containerName) } - body := configWrapper{ + body := container.CreateRequest{ Config: config, HostConfig: hostConfig, NetworkingConfig: networkingConfig, } - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } - err = json.NewDecoder(serverResp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } @@ -114,3 +127,42 @@ func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) b } return false } + +// allCapabilities is a magic value for "all capabilities" +const allCapabilities = "ALL" + +// normalizeCapabilities normalizes capabilities to their canonical form, +// removes duplicates, and sorts the results. +// +// It is similar to [github.com/docker/docker/oci/caps.NormalizeLegacyCapabilities], +// but performs no validation based on supported capabilities. +func normalizeCapabilities(caps []string) []string { + var normalized []string + + unique := make(map[string]struct{}) + for _, c := range caps { + c = normalizeCap(c) + if _, ok := unique[c]; ok { + continue + } + unique[c] = struct{}{} + normalized = append(normalized, c) + } + + sort.Strings(normalized) + return normalized +} + +// normalizeCap normalizes a capability to its canonical format by upper-casing +// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL" +// magic-value. +func normalizeCap(cap string) string { + cap = strings.ToUpper(cap) + if cap == allCapabilities { + return cap + } + if !strings.HasPrefix(cap, "CAP_") { + cap = "CAP_" + cap + } + return cap +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go index c22c819a79..52401898bd 100644 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -10,14 +10,21 @@ import ( // ContainerDiff shows differences in a container filesystem since it was started. func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) { - var changes []container.FilesystemChange + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(resp) if err != nil { - return changes, err + return nil, err } - err = json.NewDecoder(serverResp.body).Decode(&changes) + var changes []container.FilesystemChange + err = json.NewDecoder(resp.Body).Decode(&changes) + if err != nil { + return nil, err + } return changes, err } diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 9379448d1a..a39ec71790 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -11,8 +11,11 @@ import ( ) // ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) { - var response types.IDResponse +func (cli *Client) ContainerExecCreate(ctx context.Context, containerID string, options container.ExecOptions) (container.ExecCreateResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.ExecCreateResponse{}, err + } // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. @@ -20,22 +23,24 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, op // Normally, version-negotiation (if enabled) would not happen until // the API request is made. if err := cli.checkVersion(ctx); err != nil { - return response, err + return container.ExecCreateResponse{}, err } if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil { - return response, err + return container.ExecCreateResponse{}, err } if versions.LessThan(cli.ClientVersion(), "1.42") { options.ConsoleSize = nil } - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil) + resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { - return response, err + return container.ExecCreateResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + + var response container.ExecCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } @@ -70,7 +75,7 @@ func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (con return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go index d0c0a5cbad..360d527630 100644 --- a/vendor/github.com/docker/docker/client/container_export.go +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -10,10 +10,15 @@ import ( // and returns them as an io.ReadCloser. It's up to the caller // to close the stream. func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + containerID, err := trimID("container", containerID) if err != nil { return nil, err } - return serverResp.body, nil + resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index d48f0d3a68..6000318607 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -7,46 +7,50 @@ import ( "io" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - if containerID == "" { - return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.InspectResponse{}, err } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) + + resp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return types.ContainerJSON{}, err + return container.InspectResponse{}, err } - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) + var response container.InspectResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } // ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - if containerID == "" { - return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (container.InspectResponse, []byte, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.InspectResponse{}, nil, err } + query := url.Values{} if getSize { query.Set("size", "1") } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { - return types.ContainerJSON{}, nil, err + return container.InspectResponse{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { - return types.ContainerJSON{}, nil, err + return container.InspectResponse{}, nil, err } - var response types.ContainerJSON + var response container.InspectResponse rdr := bytes.NewReader(body) err = json.NewDecoder(rdr).Decode(&response) return response, body, err diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index 7c9529f1e1..22767ae682 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -7,6 +7,11 @@ import ( // ContainerKill terminates the container process but does not remove the container from the docker host. func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if signal != "" { query.Set("signal", signal) diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index 782e1b3c62..510bcdf680 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -6,13 +6,12 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" ) // ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) { +func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) { query := url.Values{} if options.All { @@ -51,7 +50,7 @@ func (cli *Client) ContainerList(ctx context.Context, options container.ListOpti return nil, err } - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) + var containers []container.Summary + err = json.NewDecoder(resp.Body).Decode(&containers) return containers, err } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 61197d8407..ae30f8d10d 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -33,7 +33,12 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) { +func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options container.LogsOptions) (io.ReadCloser, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") @@ -72,9 +77,9 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options } query.Set("tail", options.Tail) - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil) if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go index 5e7271a371..5cc2984013 100644 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -4,6 +4,11 @@ import "context" // ContainerPause pauses the main process of a given container without terminating it. func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 29c922da77..3176be5969 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -11,25 +11,24 @@ import ( // ContainersPrune requests the daemon to delete unused data func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) { - var report container.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { - return report, err + return container.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return container.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return container.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) + var report container.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return container.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index 39f7b106a1..6661351a92 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -9,6 +9,11 @@ import ( // ContainerRemove kills and removes a container from the docker host. func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.RemoveVolumes { query.Set("v", "1") diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go index 240fdf552b..0a092310c6 100644 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -7,6 +7,11 @@ import ( // ContainerRename changes the name of a given container. func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} query.Set("name", newContainerName) resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index 5cfd01d479..725c08ad41 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -10,18 +10,27 @@ import ( // ContainerResize changes the size of the tty for a container. func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) } // ContainerExecResize changes the size of the tty for an exec process running inside a container. func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error { + execID, err := trimID("exec", execID) + if err != nil { + return err + } return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) } func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) + query.Set("h", strconv.FormatUint(uint64(height), 10)) + query.Set("w", strconv.FormatUint(uint64(width), 10)) resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 02b5079bc4..50559ba6e4 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -13,6 +13,11 @@ import ( // It makes the daemon wait for the container to be up again for // a specific amount of time, given the timeout. func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index 33ba85f248..b81ed3ebc8 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -9,6 +9,11 @@ import ( // ContainerStart sends a request to the docker daemon to start a container. func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if len(options.CheckpointID) != 0 { query.Set("checkpoint", options.CheckpointID) diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index b5641daee9..a66b90cb28 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -10,6 +10,11 @@ import ( // ContainerStats returns near realtime stats for a given container. // It's up to the caller to close the io.ReadCloser returned. func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.StatsResponseReader{}, err + } + query := url.Values{} query.Set("stream", "0") if stream { @@ -22,14 +27,19 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea } return container.StatsResponseReader{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), + Body: resp.Body, + OSType: getDockerOS(resp.Header.Get("Server")), }, nil } // ContainerStatsOneShot gets a single stat entry from a container. // It differs from `ContainerStats` in that the API should not wait to prime the stats func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.StatsResponseReader{}, err + } + query := url.Values{} query.Set("stream", "0") query.Set("one-shot", "1") @@ -40,7 +50,7 @@ func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string } return container.StatsResponseReader{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), + Body: resp.Body, + OSType: getDockerOS(resp.Header.Get("Server")), }, nil } diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 7c98a354b4..eb0129ce37 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -17,6 +17,11 @@ import ( // otherwise the engine default. A negative timeout value can be specified, // meaning no timeout, i.e. no forceful termination is performed. func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go index a5b78999bf..12c8b78f6c 100644 --- a/vendor/github.com/docker/docker/client/container_top.go +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -10,8 +10,12 @@ import ( ) // ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.TopResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.TopResponse{}, err + } + query := url.Values{} if len(arguments) > 0 { query.Set("ps_args", strings.Join(arguments, " ")) @@ -20,9 +24,10 @@ func (cli *Client) ContainerTop(ctx context.Context, containerID string, argumen resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) defer ensureReaderClosed(resp) if err != nil { - return response, err + return container.TopResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + var response container.TopResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go index 1d8f873169..f602549bb2 100644 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -4,6 +4,11 @@ import "context" // ContainerUnpause resumes the process execution within a container func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index bf68a5300e..7f0cf62760 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -8,14 +8,19 @@ import ( ) // ContainerUpdate updates the resources of a container. -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - defer ensureReaderClosed(serverResp) +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.UpdateResponse, error) { + containerID, err := trimID("container", containerID) if err != nil { - return response, err + return container.UpdateResponse{}, err } - err = json.NewDecoder(serverResp.body).Decode(&response) + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(resp) + if err != nil { + return container.UpdateResponse{}, err + } + + var response container.UpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 8bb6be0a18..bda4a9eeee 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -33,6 +33,12 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit resultC := make(chan container.WaitResponse) errC := make(chan error, 1) + containerID, err := trimID("container", containerID) + if err != nil { + errC <- err + return resultC, errC + } + // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // @@ -61,9 +67,8 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit go func() { defer ensureReaderClosed(resp) - body := resp.body responseText := bytes.NewBuffer(nil) - stream := io.TeeReader(body, responseText) + stream := io.TeeReader(resp.Body, responseText) var res container.WaitResponse if err := json.NewDecoder(stream).Decode(&res); err != nil { @@ -105,7 +110,7 @@ func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) defer ensureReaderClosed(resp) var res container.WaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { errC <- err return } diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index ba0d92e9e6..ed788125c0 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -19,14 +19,14 @@ func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions } } - serverResp, err := cli.get(ctx, "/system/df", query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(resp) if err != nil { return types.DiskUsage{}, err } var du types.DiskUsage - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) } return du, nil diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index 68e6ec5ed6..b8654b24f1 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -11,14 +11,12 @@ import ( // DistributionInspect returns the image digest with the full manifest. func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registry.DistributionInspect if imageRef == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: imageRef} + return registry.DistributionInspect{}, objectNotFoundError{object: "distribution", id: imageRef} } if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { - return distributionInspect, err + return registry.DistributionInspect{}, err } var headers http.Header @@ -28,12 +26,14 @@ func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedReg } } + // Contact the registry to retrieve digest and platform information resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) defer ensureReaderClosed(resp) if err != nil { - return distributionInspect, err + return registry.DistributionInspect{}, err } - err = json.NewDecoder(resp.body).Decode(&distributionInspect) + var distributionInspect registry.DistributionInspect + err = json.NewDecoder(resp.Body).Decode(&distributionInspect) return distributionInspect, err } diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 0d01e243fe..609f92ce66 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "context" + "errors" "fmt" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" - "github.com/pkg/errors" ) // errConnectionFailed implements an error returned when connection failed. @@ -29,10 +29,18 @@ func IsErrConnectionFailed(err error) bool { } // ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +// +// Deprecated: this function was only used internally, and will be removed in the next release. func ErrorConnectionFailed(host string) error { + return connectionFailed(host) +} + +// connectionFailed returns an error with host in the error message when connection +// to docker daemon failed. +func connectionFailed(host string) error { var err error if host == "" { - err = fmt.Errorf("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + err = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") } else { err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host) } diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index d3ab26bed8..c71d2a088f 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -36,9 +36,9 @@ func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-ch errs <- err return } - defer resp.body.Close() + defer resp.Body.Close() - decoder := json.NewDecoder(resp.body) + decoder := json.NewDecoder(resp.Body) close(started) for { diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 839d4c5cd6..2c78fad002 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -25,12 +25,17 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu if err != nil { return types.HijackedResponse{}, err } - conn, mediaType, err := cli.setupHijackConn(req, "tcp") + conn, mediaType, err := setupHijackConn(cli.dialer(), req, "tcp") if err != nil { return types.HijackedResponse{}, err } - return types.NewHijackedResponse(conn, mediaType), err + if versions.LessThan(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = "" + } + + return types.NewHijackedResponse(conn, mediaType), nil } // DialHijack returns a hijacked connection with negotiated protocol proto. @@ -41,16 +46,15 @@ func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[s } req = cli.addHeaders(req, meta) - conn, _, err := cli.setupHijackConn(req, proto) + conn, _, err := setupHijackConn(cli.Dialer(), req, proto) return conn, err } -func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { +func setupHijackConn(dialer func(context.Context) (net.Conn, error), req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { ctx := req.Context() req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) - dialer := cli.Dialer() conn, err := dialer(ctx) if err != nil { return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") @@ -96,13 +100,7 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, hc.r.Reset(nil) } - var mediaType string - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { - // Prior to 1.42, Content-Type is always set to raw-stream and not relevant - mediaType = resp.Header.Get("Content-Type") - } - - return conn, mediaType, nil + return conn, resp.Header.Get("Content-Type"), nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index d294ddc8b2..6e2a40687b 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" ) // ImageBuild sends a request to the daemon to build images. @@ -32,22 +33,27 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/x-tar") - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { return types.ImageBuildResponse{}, err } return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: getDockerOS(serverResp.header.Get("Server")), + Body: resp.Body, + OSType: getDockerOS(resp.Header.Get("Server")), }, nil } func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, + query := url.Values{} + if len(options.Tags) > 0 { + query["t"] = options.Tags + } + if len(options.SecurityOpt) > 0 { + query["securityopt"] = options.SecurityOpt + } + if len(options.ExtraHosts) > 0 { + query["extrahosts"] = options.ExtraHosts } if options.SuppressOutput { query.Set("q", "1") @@ -58,9 +64,11 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.I if options.NoCache { query.Set("nocache", "1") } - if options.Remove { - query.Set("rm", "1") - } else { + if !options.Remove { + // only send value when opting out because the daemon's default is + // to remove intermediate containers after a successful build, + // + // TODO(thaJeztah): deprecate "Remove" option, and provide a "NoRemove" or "Keep" option instead. query.Set("rm", "0") } @@ -83,42 +91,70 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.I query.Set("isolation", string(options.Isolation)) } - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err + if options.CPUSetCPUs != "" { + query.Set("cpusetcpus", options.CPUSetCPUs) } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err + if options.NetworkMode != "" && options.NetworkMode != network.NetworkDefault { + query.Set("networkmode", options.NetworkMode) } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err + if options.CPUSetMems != "" { + query.Set("cpusetmems", options.CPUSetMems) } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err + if options.CPUShares != 0 { + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + } + if options.CPUQuota != 0 { + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + } + if options.CPUPeriod != 0 { + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + } + if options.Memory != 0 { + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + } + if options.MemorySwap != 0 { + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + } + if options.CgroupParent != "" { + query.Set("cgroupparent", options.CgroupParent) + } + if options.ShmSize != 0 { + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + } + if options.Dockerfile != "" { + query.Set("dockerfile", options.Dockerfile) + } + if options.Target != "" { + query.Set("target", options.Target) + } + if len(options.Ulimits) != 0 { + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + } + if len(options.BuildArgs) != 0 { + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + } + if len(options.Labels) != 0 { + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + } + if len(options.CacheFrom) != 0 { + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) } - query.Set("cachefrom", string(cacheFromJSON)) if options.SessionID != "" { query.Set("session", options.SessionID) } @@ -131,7 +167,9 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.I if options.BuildID != "" { query.Set("buildid", options.BuildID) } - query.Set("version", string(options.Version)) + if options.Version != "" { + query.Set("version", string(options.Version)) + } if options.Outputs != nil { outputsJSON, err := json.Marshal(options.Outputs) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 7c7873dca5..0357051e7a 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -30,10 +30,10 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/images/create", query, nil, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go index b5bea10d8f..49381fb839 100644 --- a/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -3,20 +3,54 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "fmt" "net/url" "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// ImageHistoryWithPlatform sets the platform for the image history operation. +func ImageHistoryWithPlatform(platform ocispec.Platform) ImageHistoryOption { + return imageHistoryOptionFunc(func(opt *imageHistoryOpts) error { + if opt.apiOptions.Platform != nil { + return fmt.Errorf("platform already set to %s", *opt.apiOptions.Platform) + } + opt.apiOptions.Platform = &platform + return nil + }) +} + // ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - defer ensureReaderClosed(serverResp) +func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts ...ImageHistoryOption) ([]image.HistoryResponseItem, error) { + query := url.Values{} + + var opts imageHistoryOpts + for _, o := range historyOpts { + if err := o.Apply(&opts); err != nil { + return nil, err + } + } + + if opts.apiOptions.Platform != nil { + if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { + return nil, err + } + + p, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return nil, err + } + query.Set("platform", p) + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/history", query, nil) + defer ensureReaderClosed(resp) if err != nil { - return history, err + return nil, err } - err = json.NewDecoder(serverResp.body).Decode(&history) + var history []image.HistoryResponseItem + err = json.NewDecoder(resp.Body).Decode(&history) return history, err } diff --git a/vendor/github.com/docker/docker/client/image_history_opts.go b/vendor/github.com/docker/docker/client/image_history_opts.go new file mode 100644 index 0000000000..6d3494dd0b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history_opts.go @@ -0,0 +1,19 @@ +package client + +import ( + "github.com/docker/docker/api/types/image" +) + +// ImageHistoryOption is a type representing functional options for the image history operation. +type ImageHistoryOption interface { + Apply(*imageHistoryOpts) error +} +type imageHistoryOptionFunc func(opt *imageHistoryOpts) error + +func (f imageHistoryOptionFunc) Apply(o *imageHistoryOpts) error { + return f(o) +} + +type imageHistoryOpts struct { + apiOptions image.HistoryOptions +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index 43d55eda8e..5849d85bd7 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -21,10 +21,18 @@ func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, r } query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) + if source.SourceName != "" { + query.Set("fromSrc", source.SourceName) + } + if ref != "" { + query.Set("repo", ref) + } + if options.Tag != "" { + query.Set("tag", options.Tag) + } + if options.Message != "" { + query.Set("message", options.Message) + } if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) } @@ -36,5 +44,5 @@ func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, r if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 1de10e5a08..1161195467 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -4,29 +4,62 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" + "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { +// ImageInspect returns the image information. +func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts ...ImageInspectOption) (image.InspectResponse, error) { if imageID == "" { - return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + return image.InspectResponse{}, objectNotFoundError{object: "image", id: imageID} } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ImageInspect{}, nil, err + + var opts imageInspectOpts + for _, opt := range inspectOpts { + if err := opt.Apply(&opts); err != nil { + return image.InspectResponse{}, fmt.Errorf("error applying image inspect option: %w", err) + } } - body, err := io.ReadAll(serverResp.body) + query := url.Values{} + if opts.apiOptions.Manifests { + if err := cli.NewVersionError(ctx, "1.48", "manifests"); err != nil { + return image.InspectResponse{}, err + } + query.Set("manifests", "1") + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { - return types.ImageInspect{}, nil, err + return image.InspectResponse{}, err + } + + buf := opts.raw + if buf == nil { + buf = &bytes.Buffer{} } - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err + if _, err := io.Copy(buf, resp.Body); err != nil { + return image.InspectResponse{}, err + } + + var response image.InspectResponse + err = json.Unmarshal(buf.Bytes(), &response) + return response, err +} + +// ImageInspectWithRaw returns the image information and its raw representation. +// +// Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error) { + var buf bytes.Buffer + resp, err := cli.ImageInspect(ctx, imageID, ImageInspectWithRawResponse(&buf)) + if err != nil { + return image.InspectResponse{}, nil, err + } + return resp, buf.Bytes(), err } diff --git a/vendor/github.com/docker/docker/client/image_inspect_opts.go b/vendor/github.com/docker/docker/client/image_inspect_opts.go new file mode 100644 index 0000000000..2607f36789 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect_opts.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + + "github.com/docker/docker/api/types/image" +) + +// ImageInspectOption is a type representing functional options for the image inspect operation. +type ImageInspectOption interface { + Apply(*imageInspectOpts) error +} +type imageInspectOptionFunc func(opt *imageInspectOpts) error + +func (f imageInspectOptionFunc) Apply(o *imageInspectOpts) error { + return f(o) +} + +// ImageInspectWithRawResponse instructs the client to additionally store the +// raw inspect response in the provided buffer. +func ImageInspectWithRawResponse(raw *bytes.Buffer) ImageInspectOption { + return imageInspectOptionFunc(func(opts *imageInspectOpts) error { + opts.raw = raw + return nil + }) +} + +// ImageInspectWithManifests sets manifests API option for the image inspect operation. +// This option is only available for API version 1.48 and up. +// With this option set, the image inspect operation response will have the +// [image.InspectResponse.Manifests] field populated if the server is multi-platform capable. +func ImageInspectWithManifests(manifests bool) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Manifests = manifests + return nil + }) +} + +// ImageInspectWithAPIOpts sets the API options for the image inspect operation. +func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions = opts + return nil + }) +} + +type imageInspectOpts struct { + raw *bytes.Buffer + apiOptions image.InspectOptions +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index bef679431d..e1911eb7e6 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -56,12 +56,12 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([] query.Set("manifests", "1") } - serverResp, err := cli.get(ctx, "/images/json", query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { return images, err } - err = json.NewDecoder(serverResp.body).Decode(&images) + err = json.NewDecoder(resp.Body).Decode(&images) return images, err } diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index c68f0013e6..d83877d4b3 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -12,20 +12,43 @@ import ( // ImageLoad loads an image in the docker host from the client host. // It's up to the caller to close the io.ReadCloser in the // ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") +// +// Platform is an optional parameter that specifies the platform to load from +// the provided multi-platform image. This is only has effect if the input image +// is a multi-platform image. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (image.LoadResponse, error) { + var opts imageLoadOpts + for _, opt := range loadOpts { + if err := opt.Apply(&opts); err != nil { + return image.LoadResponse{}, err + } } - resp, err := cli.postRaw(ctx, "/images/load", v, input, http.Header{ + + query := url.Values{} + query.Set("quiet", "0") + if opts.apiOptions.Quiet { + query.Set("quiet", "1") + } + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { + return image.LoadResponse{}, err + } + + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return image.LoadResponse{}, err + } + query["platform"] = p + } + + resp, err := cli.postRaw(ctx, "/images/load", query, input, http.Header{ "Content-Type": {"application/x-tar"}, }) if err != nil { return image.LoadResponse{}, err } return image.LoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", + Body: resp.Body, + JSON: resp.Header.Get("Content-Type") == "application/json", }, nil } diff --git a/vendor/github.com/docker/docker/client/image_load_opts.go b/vendor/github.com/docker/docker/client/image_load_opts.go new file mode 100644 index 0000000000..ebcedd41ff --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load_opts.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageLoadOption is a type representing functional options for the image load operation. +type ImageLoadOption interface { + Apply(*imageLoadOpts) error +} +type imageLoadOptionFunc func(opt *imageLoadOpts) error + +func (f imageLoadOptionFunc) Apply(o *imageLoadOpts) error { + return f(o) +} + +type imageLoadOpts struct { + apiOptions image.LoadOptions +} + +// ImageLoadWithQuiet sets the quiet option for the image load operation. +func ImageLoadWithQuiet(quiet bool) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + opt.apiOptions.Quiet = quiet + return nil + }) +} + +// ImageLoadWithPlatforms sets the platforms to be loaded from the image. +func ImageLoadWithPlatforms(platforms ...ocispec.Platform) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 5ee987e248..7c354d7b12 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -11,25 +11,24 @@ import ( // ImagesPrune requests the daemon to delete unused data func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (image.PruneReport, error) { - var report image.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { - return report, err + return image.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return image.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return image.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) + var report image.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return image.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index 1634c4c800..4286942b38 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -45,7 +45,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } // getAPITagFromNamedRef returns a tag from the specified reference. diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 16f9c4651d..b340bc4fba 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -63,10 +63,10 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 652d1bfa3e..b0c87ca09c 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -19,13 +19,13 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options imag query.Set("noprune", "1") } - var dels []image.DeleteResponse resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return dels, err + return nil, err } - err = json.NewDecoder(resp.body).Decode(&dels) + var dels []image.DeleteResponse + err = json.NewDecoder(resp.Body).Decode(&dels) return dels, err } diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go index d1314e4b22..0aa7177d20 100644 --- a/vendor/github.com/docker/docker/client/image_save.go +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -7,15 +7,35 @@ import ( ) // ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { +// +// Platforms is an optional parameter that specifies the platforms to save from the image. +// This is only has effect if the input image is a multi-platform image. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ...ImageSaveOption) (io.ReadCloser, error) { + var opts imageSaveOpts + for _, opt := range saveOpts { + if err := opt.Apply(&opts); err != nil { + return nil, err + } + } + query := url.Values{ "names": imageIDs, } + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { + return nil, err + } + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return nil, err + } + query["platform"] = p + } + resp, err := cli.get(ctx, "/images/get", query, nil) if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/image_save_opts.go b/vendor/github.com/docker/docker/client/image_save_opts.go new file mode 100644 index 0000000000..acd8f282b3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save_opts.go @@ -0,0 +1,33 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ImageSaveOption interface { + Apply(*imageSaveOpts) error +} + +type imageSaveOptionFunc func(opt *imageSaveOpts) error + +func (f imageSaveOptionFunc) Apply(o *imageSaveOpts) error { + return f(o) +} + +// ImageSaveWithPlatforms sets the platforms to be saved from the image. +func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption { + return imageSaveOptionFunc(func(opt *imageSaveOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} + +type imageSaveOpts struct { + apiOptions image.SaveOptions +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 0a07457574..0a7b5ec226 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -43,11 +43,11 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options registr return results, err } - err = json.NewDecoder(resp.body).Decode(&results) + err = json.NewDecoder(resp.Body).Decode(&results) return results, err } -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { return cli.get(ctx, "/images/search", query, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index cc3fcc4670..6396f4b60f 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -12,13 +12,13 @@ import ( // Info returns information about the docker server. func (cli *Client) Info(ctx context.Context) (system.Info, error) { var info system.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(resp) if err != nil { return info, err } - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { return info, fmt.Errorf("Error reading remote info: %v", err) } diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd7426..0000000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go index 19e985e0b9..d3572c1bf4 100644 --- a/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/docker/docker/client/login.go @@ -19,6 +19,6 @@ func (cli *Client) RegistryLogin(ctx context.Context, auth registry.AuthConfig) } var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go index 8daf890635..fa7cc34faa 100644 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -8,6 +8,16 @@ import ( // NetworkConnect connects a container to an existent network in the docker host. func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + networkID, err := trimID("network", networkID) + if err != nil { + return err + } + + containerID, err = trimID("container", containerID) + if err != nil { + return err + } + nc := network.ConnectOptions{ Container: containerID, EndpointConfig: config, diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 850e31cc97..eef9514456 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -10,15 +10,13 @@ import ( // NetworkCreate creates a new network in the docker host. func (cli *Client) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) { - var response network.CreateResponse - // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. if err := cli.checkVersion(ctx); err != nil { - return response, err + return network.CreateResponse{}, err } networkCreateRequest := network.CreateRequest{ @@ -30,12 +28,13 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options netwo networkCreateRequest.CheckDuplicate = &enabled //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. } - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(resp) if err != nil { - return response, err + return network.CreateResponse{}, err } - err = json.NewDecoder(serverResp.body).Decode(&response) + var response network.CreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go index aaf428d853..d8051df2fa 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -8,6 +8,16 @@ import ( // NetworkDisconnect disconnects a container from an existent network in the docker host. func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + networkID, err := trimID("network", networkID) + if err != nil { + return err + } + + containerID, err = trimID("container", containerID) + if err != nil { + return err + } + nd := network.DisconnectOptions{ Container: containerID, Force: force, diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index afc47de6fa..1387c080a5 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -18,8 +18,9 @@ func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, []byte, error) { - if networkID == "" { - return network.Inspect{}, nil, objectNotFoundError{object: "network", id: networkID} + networkID, err := trimID("network", networkID) + if err != nil { + return network.Inspect{}, nil, err } query := url.Values{} if options.Verbose { @@ -35,7 +36,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, return network.Inspect{}, nil, err } - raw, err := io.ReadAll(resp.body) + raw, err := io.ReadAll(resp.Body) if err != nil { return network.Inspect{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index 72957d47fe..e1b4fca731 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -27,6 +27,6 @@ func (cli *Client) NetworkList(ctx context.Context, options network.ListOptions) if err != nil { return networkResources, err } - err = json.NewDecoder(resp.body).Decode(&networkResources) + err = json.NewDecoder(resp.Body).Decode(&networkResources) return networkResources, err } diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index 708cc61a4b..90d3679f38 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -11,25 +11,24 @@ import ( // NetworksPrune requests the daemon to delete unused networks func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (network.PruneReport, error) { - var report network.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { - return report, err + return network.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return network.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return network.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) + var report network.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return network.PruneReport{}, fmt.Errorf("Error retrieving network prune report: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index 9d6c6cef07..89fdaaf3a8 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -4,6 +4,10 @@ import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + networkID, err := trimID("network", networkID) + if err != nil { + return err + } resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) defer ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index 95ab9b1be0..5d3343dc40 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -11,16 +11,17 @@ import ( // NodeInspectWithRaw returns the node information. func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if nodeID == "" { - return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + nodeID, err := trimID("node", nodeID) + if err != nil { + return swarm.Node{}, nil, err } - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Node{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Node{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index 1a9e6bfb1b..2534f4aee1 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -30,6 +30,6 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) } var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) + err = json.NewDecoder(resp.Body).Decode(&nodes) return nodes, err } diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index e44436debc..81f8fed6b5 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -9,6 +9,11 @@ import ( // NodeRemove removes a Node. func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + nodeID, err := trimID("node", nodeID) + if err != nil { + return err + } + query := url.Values{} if options.Force { query.Set("force", "1") diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index 0d0fc3b788..10e2186615 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -9,6 +9,11 @@ import ( // NodeUpdate updates a Node. func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + nodeID, err := trimID("node", nodeID) + if err != nil { + return err + } + query := url.Values{} query.Set("version", version.String()) resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index ddb0ca3991..6f68fc2b89 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -6,11 +6,13 @@ import ( "net/http" "os" "path/filepath" + "strings" "time" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel/trace" ) @@ -194,8 +196,8 @@ func WithTLSClientConfigFromEnv() Opt { // (see [WithAPIVersionNegotiation]). func WithVersion(version string) Opt { return func(c *Client) error { - if version != "" { - c.version = version + if v := strings.TrimPrefix(version, "v"); v != "" { + c.version = v c.manualOverride = true } return nil @@ -226,8 +228,13 @@ func WithAPIVersionNegotiation() Opt { // WithTraceProvider sets the trace provider for the client. // If this is not set then the global trace provider will be used. func WithTraceProvider(provider trace.TracerProvider) Opt { + return WithTraceOptions(otelhttp.WithTracerProvider(provider)) +} + +// WithTraceOptions sets tracing span options for the client. +func WithTraceOptions(opts ...otelhttp.Option) Opt { return func(c *Client) error { - c.tp = provider + c.traceOpts = append(c.traceOpts, opts...) return nil } } diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index bf3e9b1cd6..c7645e56d6 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -8,7 +8,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/errdefs" ) // Ping pings the server and returns the value of the "Docker-Experimental", @@ -28,49 +27,54 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { if err != nil { return ping, err } - serverResp, err := cli.doRequest(req) - if err == nil { - defer ensureReaderClosed(serverResp) - switch serverResp.statusCode { + resp, err := cli.doRequest(req) + if err != nil { + if IsErrConnectionFailed(err) { + return ping, err + } + // We managed to connect, but got some error; continue and try GET request. + } else { + defer ensureReaderClosed(resp) + switch resp.StatusCode { case http.StatusOK, http.StatusInternalServerError: // Server handled the request, so parse the response - return parsePingResponse(cli, serverResp) + return parsePingResponse(cli, resp) } - } else if IsErrConnectionFailed(err) { - return ping, err } // HEAD failed; fallback to GET. req.Method = http.MethodGet - serverResp, err = cli.doRequest(req) - defer ensureReaderClosed(serverResp) + resp, err = cli.doRequest(req) + defer ensureReaderClosed(resp) if err != nil { return ping, err } - return parsePingResponse(cli, serverResp) + return parsePingResponse(cli, resp) } -func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { +func parsePingResponse(cli *Client, resp *http.Response) (types.Ping, error) { + if resp == nil { + return types.Ping{}, nil + } + var ping types.Ping - if resp.header == nil { - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) + if resp.Header == nil { + return ping, cli.checkResponseErr(resp) } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") - if resp.header.Get("Docker-Experimental") == "true" { + ping.APIVersion = resp.Header.Get("Api-Version") + ping.OSType = resp.Header.Get("Ostype") + if resp.Header.Get("Docker-Experimental") == "true" { ping.Experimental = true } - if bv := resp.header.Get("Builder-Version"); bv != "" { + if bv := resp.Header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } - if si := resp.header.Get("Swarm"); si != "" { + if si := resp.Header.Get("Swarm"); si != "" { state, role, _ := strings.Cut(si, "/") ping.SwarmStatus = &swarm.Status{ NodeState: swarm.LocalNodeState(state), ControlAvailable: role == "manager", } } - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) + return ping, cli.checkResponseErr(resp) } diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go index 01f6574f95..9fabe77bf6 100644 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -9,6 +9,10 @@ import ( // PluginDisable disables a plugin func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } query := url.Values{} if options.Force { query.Set("force", "1") diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go index 736da48bd1..492d0bcff5 100644 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -10,6 +10,10 @@ import ( // PluginEnable enables a plugin func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } query := url.Values{} query.Set("timeout", strconv.Itoa(options.Timeout)) diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index f09e460660..8f107a760e 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -11,8 +11,9 @@ import ( // PluginInspectWithRaw inspects an existing plugin func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - if name == "" { - return nil, nil, objectNotFoundError{object: "plugin", id: name} + name, err := trimID("plugin", name) + if err != nil { + return nil, nil, err } resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) defer ensureReaderClosed(resp) @@ -20,7 +21,7 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type return nil, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index a0d8c3500c..b04dcf9a10 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -35,13 +35,13 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types return nil, err } - name = resp.header.Get("Docker-Plugin-Name") + name = resp.Header.Get("Docker-Plugin-Name") pr, pw := io.Pipe() go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) + _, err := io.Copy(pw, resp.Body) if err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return } defer func() { @@ -52,29 +52,29 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types }() if len(options.Args) > 0 { if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return } } if options.Disabled { - pw.Close() + _ = pw.Close() return } enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) + _ = pw.CloseWithError(enableErr) }() return pr, nil } -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { return cli.get(ctx, "/plugins/privileges", query, http.Header{ registry.AuthHeader: {registryAuth}, }) } -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ registry.AuthHeader: {registryAuth}, }) @@ -98,7 +98,7 @@ func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, } var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&privileges); err != nil { ensureReaderClosed(resp) return nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index 2091a054d6..03bcf7621d 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -28,6 +28,6 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P return plugins, err } - err = json.NewDecoder(resp.body).Decode(&plugins) + err = json.NewDecoder(resp.Body).Decode(&plugins) return plugins, err } diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 8f68a86eee..da15e449d8 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -10,11 +10,15 @@ import ( // PluginPush pushes a plugin to a registry func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + name, err := trimID("plugin", name) + if err != nil { + return nil, err + } resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ registry.AuthHeader: {registryAuth}, }) if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index 4cd66958c3..6ee107e3cc 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -9,6 +9,11 @@ import ( // PluginRemove removes a plugin func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } + query := url.Values{} if options.Force { query.Set("force", "1") diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go index dcf5752ca2..e2a79838d5 100644 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -6,6 +6,11 @@ import ( // PluginSet modifies settings for an existing plugin func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 5cade450f4..4abb29cf01 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -13,7 +13,12 @@ import ( ) // PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) { + name, err := trimID("plugin", name) + if err != nil { + return nil, err + } + if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil { return nil, err } @@ -32,10 +37,10 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 6eea9b4e4f..2b913aab64 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -19,47 +19,39 @@ import ( "github.com/pkg/errors" ) -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - // head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) } // get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) } // post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) { body, headers, err := encodeBody(obj, headers) if err != nil { - return serverResponse{}, err + return nil, err } return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) { body, headers, err := encodeBody(obj, headers) if err != nil { - return serverResponse{}, err + return nil, err } return cli.putRaw(ctx, path, query, body, headers) } // putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { // PUT requests are expected to always have a body (apparently) // so explicitly pass an empty body to sendRequest to signal that // it should set the Content-Type header if not already present. @@ -70,7 +62,7 @@ func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, bo } // delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) } @@ -116,59 +108,60 @@ func (cli *Client) buildRequest(ctx context.Context, method, path string, body i return req, nil } -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { - return serverResponse{}, err + return nil, err } resp, err := cli.doRequest(req) switch { case errors.Is(err, context.Canceled): - return serverResponse{}, errdefs.Cancelled(err) + return nil, errdefs.Cancelled(err) case errors.Is(err, context.DeadlineExceeded): - return serverResponse{}, errdefs.Deadline(err) + return nil, errdefs.Deadline(err) case err == nil: - err = cli.checkResponseErr(resp) + return resp, cli.checkResponseErr(resp) + default: + return resp, err } - return resp, errdefs.FromStatusCode(err, resp.statusCode) } -// FIXME(thaJeztah): Should this actually return a serverResp when a connection error occurred? -func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - +func (cli *Client) doRequest(req *http.Request) (*http.Response, error) { resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} + return nil, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")} + return nil, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")} } // Don't decorate context sentinel errors; users may be comparing to // them directly. if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return serverResp, err + return nil, err } - if uErr, ok := err.(*url.Error); ok { - if nErr, ok := uErr.Err.(*net.OpError); ok { + var uErr *url.Error + if errors.As(err, &uErr) { + var nErr *net.OpError + if errors.As(uErr.Err, &nErr) { if os.IsPermission(nErr.Err) { - return serverResp, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)} + return nil, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)} } } } - if nErr, ok := err.(net.Error); ok { + var nErr net.Error + if errors.As(err, &nErr) { // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? if nErr.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) + return nil, connectionFailed(cli.host) } if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) + return nil, connectionFailed(cli.host) } } @@ -192,28 +185,37 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { } } - return serverResp, errConnectionFailed{errors.Wrap(err, "error during connect")} + return nil, errConnectionFailed{errors.Wrap(err, "error during connect")} } - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil + return resp, nil } -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { +func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { + if serverResp == nil { return nil } + if serverResp.StatusCode >= 200 && serverResp.StatusCode < 400 { + return nil + } + defer func() { + retErr = errdefs.FromStatusCode(retErr, serverResp.StatusCode) + }() var body []byte var err error - if serverResp.body != nil { + var reqURL string + if serverResp.Request != nil { + reqURL = serverResp.Request.URL.String() + } + statusMsg := serverResp.Status + if statusMsg == "" { + statusMsg = http.StatusText(serverResp.StatusCode) + } + if serverResp.Body != nil { bodyMax := 1 * 1024 * 1024 // 1 MiB bodyR := &io.LimitedReader{ - R: serverResp.body, + R: serverResp.Body, N: int64(bodyMax), } body, err = io.ReadAll(bodyR) @@ -221,21 +223,54 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { return err } if bodyR.N == 0 { - return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + if reqURL != "" { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", statusMsg, bodyMax, reqURL) + } + return fmt.Errorf("request returned %s with a message (> %d bytes); check if the server supports the requested API version", statusMsg, bodyMax) } } if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + if reqURL != "" { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", statusMsg, reqURL) + } + return fmt.Errorf("request returned %s; check if the server supports the requested API version", statusMsg) } var daemonErr error - if serverResp.header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { + if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { return errors.Wrap(err, "Error reading JSON") } - daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) + if errorResponse.Message == "" { + // Error-message is empty, which means that we successfully parsed the + // JSON-response (no error produced), but it didn't contain an error + // message. This could either be because the response was empty, or + // the response was valid JSON, but not with the expected schema + // ([types.ErrorResponse]). + // + // We cannot use "strict" JSON handling (json.NewDecoder with DisallowUnknownFields) + // due to the API using an open schema (we must anticipate fields + // being added to [types.ErrorResponse] in the future, and not + // reject those responses. + // + // For these cases, we construct an error with the status-code + // returned, but we could consider returning (a truncated version + // of) the actual response as-is. + // + // TODO(thaJeztah): consider adding a log.Debug to allow clients to debug the actual response when enabling debug logging. + daemonErr = fmt.Errorf(`API returned a %d (%s) but provided no error-message`, + serverResp.StatusCode, + http.StatusText(serverResp.StatusCode), + ) + } else { + daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) + } } else { + // Fall back to returning the response as-is for API versions < 1.24 + // that didn't support JSON error responses, and for situations + // where a plain text error is returned. This branch may also catch + // situations where a proxy is involved, returning a HTML response. daemonErr = errors.New(strings.TrimSpace(string(body))) } return errors.Wrap(daemonErr, "Error response from daemon") @@ -275,10 +310,16 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { return params, nil } -func ensureReaderClosed(response serverResponse) { - if response.body != nil { +func ensureReaderClosed(response *http.Response) { + if response != nil && response.Body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - _, _ = io.CopyN(io.Discard, response.body, 512) - _ = response.body.Close() + // see https://github.com/google/go-github/pull/317/files#r57536827 + // + // TODO(thaJeztah): see if this optimization is still needed, or already implemented in stdlib, + // and check if context-cancellation should handle this as well. If still needed, consider + // wrapping response.Body, or returning a "closer()" from [Client.sendRequest] and related + // methods. + _, _ = io.CopyN(io.Discard, response.Body, 512) + _ = response.Body.Close() } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index 7b7f1ba740..bbd1191877 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -10,16 +10,16 @@ import ( // SecretCreate creates a new secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { - return response, err + return types.SecretCreateResponse{}, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) defer ensureReaderClosed(resp) if err != nil { - return response, err + return types.SecretCreateResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + var response types.SecretCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index a9cb59889b..fdabc197f0 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -11,11 +11,12 @@ import ( // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { + id, err := trimID("secret", id) + if err != nil { return swarm.Secret{}, nil, err } - if id == "" { - return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) @@ -23,7 +24,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S return swarm.Secret{}, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Secret{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index 4d21639ef6..e3b7dbdb9e 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -33,6 +33,6 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio } var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) + err = json.NewDecoder(resp.Body).Decode(&secrets) return secrets, err } diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index 079ed67394..7ea2acbf52 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -4,6 +4,10 @@ import "context" // SecretRemove removes a secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { + id, err := trimID("secret", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 9dfe67198b..60d21a6f2c 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -9,6 +9,10 @@ import ( // SecretUpdate attempts to update a secret. func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + id, err := trimID("secret", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index b72cb420d4..54c03b1389 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -37,6 +37,11 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, if err := validateServiceSpec(service); err != nil { return response, err } + if versions.LessThan(cli.version, "1.30") { + if err := validateAPIVersion(service, cli.version); err != nil { + return response, err + } + } // ensure that the image is tagged var resolveWarning string @@ -73,7 +78,7 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) if resolveWarning != "" { response.Warnings = append(response.Warnings, resolveWarning) } @@ -191,3 +196,18 @@ func validateServiceSpec(s swarm.ServiceSpec) error { } return nil } + +func validateAPIVersion(c swarm.ServiceSpec, apiVersion string) error { + for _, m := range c.TaskTemplate.ContainerSpec.Mounts { + if m.BindOptions != nil { + if m.BindOptions.NonRecursive && versions.LessThan(apiVersion, "1.40") { + return errors.Errorf("bind-recursive=disabled requires API v1.40 or later") + } + // ReadOnlyNonRecursive can be safely ignored when API < 1.44 + if m.BindOptions.ReadOnlyForceRecursive && versions.LessThan(apiVersion, "1.44") { + return errors.Errorf("bind-recursive=readonly requires API v1.44 or later") + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index cee020c98b..77b4402d37 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -14,18 +14,20 @@ import ( // ServiceInspectWithRaw returns the service information and the raw data. func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if serviceID == "" { - return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + serviceID, err := trimID("service", serviceID) + if err != nil { + return swarm.Service{}, nil, err } + query := url.Values{} query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Service{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Service{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index f97ec75a5c..f589a8423a 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -34,6 +34,6 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt } var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) + err = json.NewDecoder(resp.Body).Decode(&services) return services, err } diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index e9e30a2ab4..6e0cbee49f 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -14,6 +14,11 @@ import ( // ServiceLogs returns the logs generated by a service in an io.ReadCloser. // It's up to the caller to close the stream. func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return nil, err + } + query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") @@ -48,5 +53,5 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options co if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index 2c46326ebc..93c949e44a 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -4,6 +4,11 @@ import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + serviceID, err := trimID("service", serviceID) + if err != nil { + return err + } + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) defer ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index d2f03f02f0..ecb98f4684 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -16,7 +16,10 @@ import ( // It should be the value as set *before* the update. You can find this value in the Meta field // of swarm.Service, which can be found using ServiceInspectWithRaw. func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { - response := swarm.ServiceUpdateResponse{} + serviceID, err := trimID("service", serviceID) + if err != nil { + return swarm.ServiceUpdateResponse{}, err + } // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. @@ -24,7 +27,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version // Normally, version-negotiation (if enabled) would not happen until // the API request is made. if err := cli.checkVersion(ctx); err != nil { - return response, err + return swarm.ServiceUpdateResponse{}, err } query := url.Values{} @@ -39,7 +42,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version query.Set("version", version.String()) if err := validateServiceSpec(service); err != nil { - return response, err + return swarm.ServiceUpdateResponse{}, err } // ensure that the image is tagged @@ -74,10 +77,11 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) defer ensureReaderClosed(resp) if err != nil { - return response, err + return swarm.ServiceUpdateResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + var response swarm.ServiceUpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) if resolveWarning != "" { response.Warnings = append(response.Warnings, resolveWarning) } diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index 19f59dd582..271fc08c95 100644 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -9,13 +9,13 @@ import ( // SwarmGetUnlockKey retrieves the swarm's unlock key. func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return types.SwarmUnlockKeyResponse{}, err } var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go index da3c1637ef..3dcb2a5b52 100644 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -9,13 +9,13 @@ import ( // SwarmInit initializes the swarm. func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(resp) if err != nil { return "", err } var response string - err = json.NewDecoder(serverResp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go index b52b67a884..3d5a8a042e 100644 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -9,13 +9,13 @@ import ( // SwarmInspect inspects the swarm. func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Swarm{}, err } var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go index d2412f7d44..745d64d5ba 100644 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -8,7 +8,7 @@ import ( // SwarmUnlock unlocks locked swarm. func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index dde1f6c59d..37668bd278 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -11,16 +11,18 @@ import ( // TaskInspectWithRaw returns the task information and its raw representation. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if taskID == "" { - return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + taskID, err := trimID("task", taskID) + if err != nil { + return swarm.Task{}, nil, err } - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - defer ensureReaderClosed(serverResp) + + resp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Task{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Task{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go index 4869b44493..aba7f61e65 100644 --- a/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -30,6 +30,6 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) } var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) + err = json.NewDecoder(resp.Body).Decode(&tasks) return tasks, err } diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go index b8c20e71da..9dcb977b3c 100644 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -47,5 +47,5 @@ func (cli *Client) TaskLogs(ctx context.Context, taskID string, options containe if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go index 7f3ff44eb8..925d4d8d38 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/docker/docker/client/utils.go @@ -1,13 +1,35 @@ package client // import "github.com/docker/docker/client" import ( + "encoding/json" + "fmt" "net/url" - "regexp" + "strings" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/internal/lazyregexp" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) +var headerRegexp = lazyregexp.New(`\ADocker/.+\s\((.+)\)\z`) + +type emptyIDError string + +func (e emptyIDError) InvalidParameter() {} + +func (e emptyIDError) Error() string { + return "invalid " + string(e) + " name or ID: value is empty" +} + +// trimID trims the given object-ID / name, returning an error if it's empty. +func trimID(objType, id string) (string, error) { + id = strings.TrimSpace(id) + if len(id) == 0 { + return "", emptyIDError(objType) + } + return id, nil +} // getDockerOS returns the operating system based on the server header from the daemon. func getDockerOS(serverHeader string) string { @@ -32,3 +54,43 @@ func getFiltersQuery(f filters.Args) (url.Values, error) { } return query, nil } + +// encodePlatforms marshals the given platform(s) to JSON format, to +// be used for query-parameters for filtering / selecting platforms. +func encodePlatforms(platform ...ocispec.Platform) ([]string, error) { + if len(platform) == 0 { + return []string{}, nil + } + if len(platform) == 1 { + p, err := encodePlatform(&platform[0]) + if err != nil { + return nil, err + } + return []string{p}, nil + } + + seen := make(map[string]struct{}, len(platform)) + out := make([]string, 0, len(platform)) + for i := range platform { + p, err := encodePlatform(&platform[i]) + if err != nil { + return nil, err + } + if _, ok := seen[p]; !ok { + out = append(out, p) + seen[p] = struct{}{} + } + } + return out, nil +} + +// encodePlatform marshals the given platform to JSON format, to +// be used for query-parameters for filtering / selecting platforms. It +// is used as a helper for encodePlatforms, +func encodePlatform(platform *ocispec.Platform) (string, error) { + p, err := json.Marshal(platform) + if err != nil { + return "", errdefs.InvalidParameter(fmt.Errorf("invalid platform: %v", err)) + } + return string(p), nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go index 8f17ff4e87..4566fd98e5 100644 --- a/vendor/github.com/docker/docker/client/version.go +++ b/vendor/github.com/docker/docker/client/version.go @@ -16,6 +16,6 @@ func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { } var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) + err = json.NewDecoder(resp.Body).Decode(&server) return server, err } diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index b3b182437b..bedb3abbb7 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -9,12 +9,13 @@ import ( // VolumeCreate creates a volume in the docker host. func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { - var vol volume.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { - return vol, err + return volume.Volume{}, err } - err = json.NewDecoder(resp.body).Decode(&vol) + + var vol volume.Volume + err = json.NewDecoder(resp.Body).Decode(&vol) return vol, err } diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index b3ba4e6046..ce32bbbb7f 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -17,21 +17,23 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.V // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { - if volumeID == "" { - return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + volumeID, err := trimID("volume", volumeID) + if err != nil { + return volume.Volume{}, nil, err } - var vol volume.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return vol, nil, err + return volume.Volume{}, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { - return vol, nil, err + return volume.Volume{}, nil, err } + + var vol volume.Volume rdr := bytes.NewReader(body) err = json.NewDecoder(rdr).Decode(&vol) return vol, body, err diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index d5ea9827c7..de6ce23a45 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -11,23 +11,23 @@ import ( // VolumeList returns the volumes configured in the docker host. func (cli *Client) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) { - var volumes volume.ListResponse query := url.Values{} if options.Filters.Len() > 0 { //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { - return volumes, err + return volume.ListResponse{}, err } query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/volumes", query, nil) defer ensureReaderClosed(resp) if err != nil { - return volumes, err + return volume.ListResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&volumes) + var volumes volume.ListResponse + err = json.NewDecoder(resp.Body).Decode(&volumes) return volumes, err } diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 9b09c30fa6..7da148feac 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -11,25 +11,24 @@ import ( // VolumesPrune requests the daemon to delete unused data func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (volume.PruneReport, error) { - var report volume.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { - return report, err + return volume.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return volume.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return volume.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + var report volume.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return volume.PruneReport{}, fmt.Errorf("Error retrieving volume prune report: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index b8bdc5ae85..eefd9ce437 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -9,6 +9,11 @@ import ( // VolumeRemove removes a volume from the docker host. func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return err + } + query := url.Values{} if force { // Make sure we negotiated (if the client is configured to do so), diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go index 151863f07a..c91d5e984e 100644 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -11,6 +11,10 @@ import ( // VolumeUpdate updates a volume. This only works for Cluster Volumes, and // only some fields can be updated. func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go index 042de4b7b8..ab76e62736 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -14,7 +14,9 @@ func (e errNotFound) Unwrap() error { return e.error } -// NotFound is a helper to create an error of the class with the same name from any error type +// NotFound creates an [ErrNotFound] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrNotFound], func NotFound(err error) error { if err == nil || IsNotFound(err) { return err @@ -34,7 +36,9 @@ func (e errInvalidParameter) Unwrap() error { return e.error } -// InvalidParameter is a helper to create an error of the class with the same name from any error type +// InvalidParameter creates an [ErrInvalidParameter] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrInvalidParameter], func InvalidParameter(err error) error { if err == nil || IsInvalidParameter(err) { return err @@ -54,7 +58,9 @@ func (e errConflict) Unwrap() error { return e.error } -// Conflict is a helper to create an error of the class with the same name from any error type +// Conflict creates an [ErrConflict] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrConflict], func Conflict(err error) error { if err == nil || IsConflict(err) { return err @@ -74,7 +80,9 @@ func (e errUnauthorized) Unwrap() error { return e.error } -// Unauthorized is a helper to create an error of the class with the same name from any error type +// Unauthorized creates an [ErrUnauthorized] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrUnauthorized], func Unauthorized(err error) error { if err == nil || IsUnauthorized(err) { return err @@ -94,7 +102,9 @@ func (e errUnavailable) Unwrap() error { return e.error } -// Unavailable is a helper to create an error of the class with the same name from any error type +// Unavailable creates an [ErrUnavailable] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrUnavailable], func Unavailable(err error) error { if err == nil || IsUnavailable(err) { return err @@ -114,7 +124,9 @@ func (e errForbidden) Unwrap() error { return e.error } -// Forbidden is a helper to create an error of the class with the same name from any error type +// Forbidden creates an [ErrForbidden] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrForbidden], func Forbidden(err error) error { if err == nil || IsForbidden(err) { return err @@ -134,7 +146,9 @@ func (e errSystem) Unwrap() error { return e.error } -// System is a helper to create an error of the class with the same name from any error type +// System creates an [ErrSystem] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrSystem], func System(err error) error { if err == nil || IsSystem(err) { return err @@ -154,7 +168,9 @@ func (e errNotModified) Unwrap() error { return e.error } -// NotModified is a helper to create an error of the class with the same name from any error type +// NotModified creates an [ErrNotModified] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [NotModified], func NotModified(err error) error { if err == nil || IsNotModified(err) { return err @@ -174,7 +190,9 @@ func (e errNotImplemented) Unwrap() error { return e.error } -// NotImplemented is a helper to create an error of the class with the same name from any error type +// NotImplemented creates an [ErrNotImplemented] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrNotImplemented], func NotImplemented(err error) error { if err == nil || IsNotImplemented(err) { return err @@ -194,7 +212,9 @@ func (e errUnknown) Unwrap() error { return e.error } -// Unknown is a helper to create an error of the class with the same name from any error type +// Unknown creates an [ErrUnknown] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrUnknown], func Unknown(err error) error { if err == nil || IsUnknown(err) { return err @@ -214,7 +234,9 @@ func (e errCancelled) Unwrap() error { return e.error } -// Cancelled is a helper to create an error of the class with the same name from any error type +// Cancelled creates an [ErrCancelled] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrCancelled], func Cancelled(err error) error { if err == nil || IsCancelled(err) { return err @@ -234,7 +256,9 @@ func (e errDeadline) Unwrap() error { return e.error } -// Deadline is a helper to create an error of the class with the same name from any error type +// Deadline creates an [ErrDeadline] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrDeadline], func Deadline(err error) error { if err == nil || IsDeadline(err) { return err @@ -254,7 +278,9 @@ func (e errDataLoss) Unwrap() error { return e.error } -// DataLoss is a helper to create an error of the class with the same name from any error type +// DataLoss creates an [ErrDataLoss] error from the given error. +// It returns the error as-is if it is either nil (no error) or already implements +// [ErrDataLoss], func DataLoss(err error) error { if err == nil || IsDataLoss(err) { return err diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index ebcd789302..0a8fadd48f 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -11,36 +11,37 @@ func FromStatusCode(err error, statusCode int) error { } switch statusCode { case http.StatusNotFound: - err = NotFound(err) + return NotFound(err) case http.StatusBadRequest: - err = InvalidParameter(err) + return InvalidParameter(err) case http.StatusConflict: - err = Conflict(err) + return Conflict(err) case http.StatusUnauthorized: - err = Unauthorized(err) + return Unauthorized(err) case http.StatusServiceUnavailable: - err = Unavailable(err) + return Unavailable(err) case http.StatusForbidden: - err = Forbidden(err) + return Forbidden(err) case http.StatusNotModified: - err = NotModified(err) + return NotModified(err) case http.StatusNotImplemented: - err = NotImplemented(err) + return NotImplemented(err) case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) + if IsCancelled(err) || IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) { + return err } + return System(err) default: switch { case statusCode >= 200 && statusCode < 400: // it's a client error + return err case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) + return InvalidParameter(err) case statusCode >= 500 && statusCode < 600: - err = System(err) + return System(err) default: - err = Unknown(err) + return Unknown(err) } } - return err } diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index f94034cbd7..30ea7e6fec 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -39,79 +39,79 @@ func getImplementer(err error) error { } } -// IsNotFound returns if the passed in error is an ErrNotFound +// IsNotFound returns if the passed in error is an [ErrNotFound], func IsNotFound(err error) bool { _, ok := getImplementer(err).(ErrNotFound) return ok } -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +// IsInvalidParameter returns if the passed in error is an [ErrInvalidParameter]. func IsInvalidParameter(err error) bool { _, ok := getImplementer(err).(ErrInvalidParameter) return ok } -// IsConflict returns if the passed in error is an ErrConflict +// IsConflict returns if the passed in error is an [ErrConflict]. func IsConflict(err error) bool { _, ok := getImplementer(err).(ErrConflict) return ok } -// IsUnauthorized returns if the passed in error is an ErrUnauthorized +// IsUnauthorized returns if the passed in error is an [ErrUnauthorized]. func IsUnauthorized(err error) bool { _, ok := getImplementer(err).(ErrUnauthorized) return ok } -// IsUnavailable returns if the passed in error is an ErrUnavailable +// IsUnavailable returns if the passed in error is an [ErrUnavailable]. func IsUnavailable(err error) bool { _, ok := getImplementer(err).(ErrUnavailable) return ok } -// IsForbidden returns if the passed in error is an ErrForbidden +// IsForbidden returns if the passed in error is an [ErrForbidden]. func IsForbidden(err error) bool { _, ok := getImplementer(err).(ErrForbidden) return ok } -// IsSystem returns if the passed in error is an ErrSystem +// IsSystem returns if the passed in error is an [ErrSystem]. func IsSystem(err error) bool { _, ok := getImplementer(err).(ErrSystem) return ok } -// IsNotModified returns if the passed in error is a NotModified error +// IsNotModified returns if the passed in error is an [ErrNotModified]. func IsNotModified(err error) bool { _, ok := getImplementer(err).(ErrNotModified) return ok } -// IsNotImplemented returns if the passed in error is an ErrNotImplemented +// IsNotImplemented returns if the passed in error is an [ErrNotImplemented]. func IsNotImplemented(err error) bool { _, ok := getImplementer(err).(ErrNotImplemented) return ok } -// IsUnknown returns if the passed in error is an ErrUnknown +// IsUnknown returns if the passed in error is an [ErrUnknown]. func IsUnknown(err error) bool { _, ok := getImplementer(err).(ErrUnknown) return ok } -// IsCancelled returns if the passed in error is an ErrCancelled +// IsCancelled returns if the passed in error is an [ErrCancelled]. func IsCancelled(err error) bool { _, ok := getImplementer(err).(ErrCancelled) return ok } -// IsDeadline returns if the passed in error is an ErrDeadline +// IsDeadline returns if the passed in error is an [ErrDeadline]. func IsDeadline(err error) bool { _, ok := getImplementer(err).(ErrDeadline) return ok } -// IsDataLoss returns if the passed in error is an ErrDataLoss +// IsDataLoss returns if the passed in error is an [ErrDataLoss]. func IsDataLoss(err error) bool { _, ok := getImplementer(err).(ErrDataLoss) return ok diff --git a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go new file mode 100644 index 0000000000..6334edb60d --- /dev/null +++ b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go @@ -0,0 +1,90 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code below was largely copied from golang.org/x/mod@v0.22; +// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go +// with some additional methods added. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string { + return r.re().FindAllStringSubmatch(s, n) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { + return r.re().ReplaceAllStringFunc(src, repl) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index 8f6e0a737a..854e4c3718 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -43,9 +43,9 @@ type stdWriter struct { // It inserts the prefix header before the buffer, // so stdcopy.StdCopy knows where to multiplex the output. // It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { +func (w *stdWriter) Write(p []byte) (int, error) { if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") + return 0, errors.New("writer not instantiated") } if p == nil { return 0, nil @@ -57,7 +57,7 @@ func (w *stdWriter) Write(p []byte) (n int, err error) { buf.Write(header[:]) buf.Write(p) - n, err = w.Writer.Write(buf.Bytes()) + n, err := w.Writer.Write(buf.Bytes()) n -= stdWriterPrefixLen if n < 0 { n = 0 @@ -65,7 +65,7 @@ func (w *stdWriter) Write(p []byte) (n int, err error) { buf.Reset() bufPool.Put(buf) - return + return n, err } // NewStdWriter instantiates a new Writer. diff --git a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md index 73fe513468..773af218e9 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md @@ -1,3 +1,72 @@ +## v2.9.0 (2025-11-17) + +* [GH-3508](https://github.com/gophercloud/gophercloud/pull/3508) [v2] Trigger "hold" workflow on merge groups +* [GH-3511](https://github.com/gophercloud/gophercloud/pull/3511) [v2] Closes #2321 - Fix TestRolesCRUD by including DomainID to TestRolesCRUD +* [GH-3513](https://github.com/gophercloud/gophercloud/pull/3513) [v2] build(deps): bump actions/labeler from 5 to 6 +* [GH-3516](https://github.com/gophercloud/gophercloud/pull/3516) [v2] refactor: Trivial fixes +* [GH-3524](https://github.com/gophercloud/gophercloud/pull/3524) [v2] [glance]: Add 'uploading' status +* [GH-3525](https://github.com/gophercloud/gophercloud/pull/3525) [v2] compute: Add host aggregate uuid field +* [GH-3526](https://github.com/gophercloud/gophercloud/pull/3526) [v2] Enable deletion for network and loadbalancer quotas +* [GH-3541](https://github.com/gophercloud/gophercloud/pull/3541) [v2] docs: Document tested releases for acceptance tests +* [GH-3544](https://github.com/gophercloud/gophercloud/pull/3544) [v2] Identity V3: Add Options field to roles. +* [GH-3547](https://github.com/gophercloud/gophercloud/pull/3547) [v2] Add config_drive to server struct +* [GH-3548](https://github.com/gophercloud/gophercloud/pull/3548) [v2] Identity: Add description field to roles +* [GH-3549](https://github.com/gophercloud/gophercloud/pull/3549) [v2] compute: add cpu info topology cells entry +* [GH-3550](https://github.com/gophercloud/gophercloud/pull/3550) [v2] Migrate epoxy jobs to Ubuntu 24.04 (Noble), drop caracal jobs +* [GH-3551](https://github.com/gophercloud/gophercloud/pull/3551) [v2] build(deps): bump github/codeql-action from 3 to 4 +* [GH-3557](https://github.com/gophercloud/gophercloud/pull/3557) [v2] Fix EC2 authentication to work with new Keystone auth requirement +* [GH-3558](https://github.com/gophercloud/gophercloud/pull/3558) [v2] identity/services: add omitempty to the `type` field +* [GH-3559](https://github.com/gophercloud/gophercloud/pull/3559) [v2] fix: handle Nova create image response for microversion 2.45 and above + +## v2.8.0 (2025-08-18) + +* [GH-3348](https://github.com/gophercloud/gophercloud/pull/3348) [v2] [networking] add ExtractRoutersInto func helper to routers +* [GH-3354](https://github.com/gophercloud/gophercloud/pull/3354) [v2] Fix a small typo +* [GH-3358](https://github.com/gophercloud/gophercloud/pull/3358) [v2] tests: fix devstack master branch tests +* [GH-3361](https://github.com/gophercloud/gophercloud/pull/3361) [v2] octavia: fix http_version type to float +* [GH-3362](https://github.com/gophercloud/gophercloud/pull/3362) [v2] tests: fix containerinfra template creation +* [GH-3367](https://github.com/gophercloud/gophercloud/pull/3367) [v2] Use Makefile for CI jobs +* [GH-3375](https://github.com/gophercloud/gophercloud/pull/3375) [v2] core: add missing Builder interfaces +* [GH-3378](https://github.com/gophercloud/gophercloud/pull/3378) [v2] tests: fix failing rabbitmq service +* [GH-3379](https://github.com/gophercloud/gophercloud/pull/3379) [v2] CI: Remove Bobcat +* [GH-3384](https://github.com/gophercloud/gophercloud/pull/3384) [v2] Move master CI jobs to Ubuntu 24.04 +* [GH-3386](https://github.com/gophercloud/gophercloud/pull/3386) [v2] tests: Fix TestBGPAgentCRUD +* [GH-3387](https://github.com/gophercloud/gophercloud/pull/3387) [v2] Update the doc of openstack.AuthOptionsFromEnv function +* [GH-3389](https://github.com/gophercloud/gophercloud/pull/3389) [v2] networking: add constants for statuses +* [GH-3391](https://github.com/gophercloud/gophercloud/pull/3391) [v2] CI: Add Epoxy +* [GH-3393](https://github.com/gophercloud/gophercloud/pull/3393) [v2] dns: implement shared zones list +* [GH-3394](https://github.com/gophercloud/gophercloud/pull/3394) [v2] acceptance: Prevent 409 when bulk-creating secgroup rules +* [GH-3396](https://github.com/gophercloud/gophercloud/pull/3396) [v2] identity: add support for string boolean in users' enabled member +* [GH-3397](https://github.com/gophercloud/gophercloud/pull/3397) [v2] Adjust List func to accept a Builder in tenants, routers and security groups packages +* [GH-3399](https://github.com/gophercloud/gophercloud/pull/3399) [v2] blockstorage: add manage-existing and unmanage api call +* [GH-3401](https://github.com/gophercloud/gophercloud/pull/3401) [v2] Added address groups to Networking extensions, with tests. +* [GH-3407](https://github.com/gophercloud/gophercloud/pull/3407) [v2] neutron: add segment_id support to subnets +* [GH-3413](https://github.com/gophercloud/gophercloud/pull/3413) [v2] build(deps): bump joelanford/go-apidiff from 0.8.2 to 0.8.3 +* [GH-3416](https://github.com/gophercloud/gophercloud/pull/3416) [v2] tests: bump devstack-action +* [GH-3422](https://github.com/gophercloud/gophercloud/pull/3422) [v2] Fix documentation for gateway_ip in subnet update +* [GH-3431](https://github.com/gophercloud/gophercloud/pull/3431) [v2] Use container-infra for OpenStack-API-Version +* [GH-3433](https://github.com/gophercloud/gophercloud/pull/3433) [v2] make: Use fixed version of gotestsum +* [GH-3434](https://github.com/gophercloud/gophercloud/pull/3434) [v2] Randomize test order for unit tests +* [GH-3435](https://github.com/gophercloud/gophercloud/pull/3435) [v2] Add versioned endpoint discovery +* [GH-3438](https://github.com/gophercloud/gophercloud/pull/3438) [v2] dns: add support for /v2/quotas +* [GH-3439](https://github.com/gophercloud/gophercloud/pull/3439) [v2] neutron: add segments extension package +* [GH-3446](https://github.com/gophercloud/gophercloud/pull/3446) nova: add support for hostname updates +* [GH-3452](https://github.com/gophercloud/gophercloud/pull/3452) [v2] neutron: allow omission of subnet_id for IP address +* [GH-3454](https://github.com/gophercloud/gophercloud/pull/3454) [v2] blockstorage: add isPublic query option for volume types +* [GH-3458](https://github.com/gophercloud/gophercloud/pull/3458) [v2] Fix pagination for messaging client +* [GH-3465](https://github.com/gophercloud/gophercloud/pull/3465) [v2] tests: Fix TestVLANTransparentCRUD test +* [GH-3466](https://github.com/gophercloud/gophercloud/pull/3466) [v2] tests: fix tests for recent PR backports +* [GH-3469](https://github.com/gophercloud/gophercloud/pull/3469) [v2] tests: shorten GH-A job names +* [GH-3473](https://github.com/gophercloud/gophercloud/pull/3473) [v2] core: clone service type aliases instead of referencing global slice +* [GH-3475](https://github.com/gophercloud/gophercloud/pull/3475) [v2] Implement update & delete traits on resource provider +* [GH-3476](https://github.com/gophercloud/gophercloud/pull/3476) [v2] tests: fix volumetypes unit tests +* [GH-3477](https://github.com/gophercloud/gophercloud/pull/3477) [v2] script: Improve getenvvar helper +* [GH-3481](https://github.com/gophercloud/gophercloud/pull/3481) [v2] Implement hypervisors.GetExt: Get with Query parameter +* [GH-3487](https://github.com/gophercloud/gophercloud/pull/3487) [v2] Add networking taas tapmirror suite +* [GH-3489](https://github.com/gophercloud/gophercloud/pull/3489) [v2] Fix incorrect ICMP field description in PortRangeMax comment +* [GH-3494](https://github.com/gophercloud/gophercloud/pull/3494) [v2] Networking v2: Support two time formats for subnet, router, SG rule (#3492) +* [GH-3495](https://github.com/gophercloud/gophercloud/pull/3495) [v2] build(deps): bump actions/checkout from 4 to 5 + ## v2.7.0 (2025-04-03) * [GH-3306](https://github.com/gophercloud/gophercloud/pull/3306) [v2] identity: Add Get endpoint by ID diff --git a/vendor/github.com/gophercloud/gophercloud/v2/Makefile b/vendor/github.com/gophercloud/gophercloud/v2/Makefile index 2a0618a6b6..c63adb8d03 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/Makefile +++ b/vendor/github.com/gophercloud/gophercloud/v2/Makefile @@ -1,7 +1,9 @@ undefine GOFLAGS GOLANGCI_LINT_VERSION?=v1.62.2 -GO_TEST?=go run gotest.tools/gotestsum@latest --format testname -- +GOTESTSUM_VERSION?=v1.12.2 +GO_TEST?=go run gotest.tools/gotestsum@$(GOTESTSUM_VERSION) --format testname -- +TIMEOUT := "60m" ifeq ($(shell command -v podman 2> /dev/null),) RUNNER=docker @@ -9,15 +11,18 @@ else RUNNER=podman endif -# if the golangci-lint steps fails with the following error message: +# if the golangci-lint steps fails with one of the following error messages: # # directory prefix . does not contain main module or its selected dependencies # +# failed to initialize build cache at /root/.cache/golangci-lint: mkdir /root/.cache/golangci-lint: permission denied +# # you probably have to fix the SELinux security context for root directory plus your cache # # chcon -Rt svirt_sandbox_file_t . # chcon -Rt svirt_sandbox_file_t ~/.cache/golangci-lint lint: + mkdir -p ~/.cache/golangci-lint/$(GOLANGCI_LINT_VERSION) $(RUNNER) run -t --rm \ -v $(shell pwd):/app \ -v ~/.cache/golangci-lint/$(GOLANGCI_LINT_VERSION):/root/.cache \ @@ -31,84 +36,88 @@ format: .PHONY: format unit: - $(GO_TEST) ./... + $(GO_TEST) -shuffle on ./... .PHONY: unit coverage: - $(GO_TEST) -covermode count -coverprofile cover.out -coverpkg=./... ./... + $(GO_TEST) -shuffle on -covermode count -coverprofile cover.out -coverpkg=./... ./... .PHONY: coverage -acceptance: acceptance-baremetal acceptance-blockstorage acceptance-compute acceptance-container acceptance-containerinfra acceptance-db acceptance-dns acceptance-identity acceptance-imageservice acceptance-keymanager acceptance-loadbalancer acceptance-messaging acceptance-networking acceptance-objectstorage acceptance-orchestration acceptance-placement acceptance-sharedfilesystems acceptance-workflow +acceptance: acceptance-basic acceptance-baremetal acceptance-blockstorage acceptance-compute acceptance-container acceptance-containerinfra acceptance-db acceptance-dns acceptance-identity acceptance-image acceptance-keymanager acceptance-loadbalancer acceptance-messaging acceptance-networking acceptance-objectstorage acceptance-orchestration acceptance-placement acceptance-sharedfilesystems acceptance-workflow .PHONY: acceptance +acceptance-basic: + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack +.PHONY: acceptance-basic + acceptance-baremetal: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/baremetal/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/baremetal/... .PHONY: acceptance-baremetal acceptance-blockstorage: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/blockstorage/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/blockstorage/... .PHONY: acceptance-blockstorage acceptance-compute: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/compute/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/compute/... .PHONY: acceptance-compute acceptance-container: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/container/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/container/... .PHONY: acceptance-container acceptance-containerinfra: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/containerinfra/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/containerinfra/... .PHONY: acceptance-containerinfra acceptance-db: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/db/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/db/... .PHONY: acceptance-db acceptance-dns: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/dns/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/dns/... .PHONY: acceptance-dns acceptance-identity: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/identity/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/identity/... .PHONY: acceptance-identity acceptance-image: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/imageservice/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/image/... .PHONY: acceptance-image acceptance-keymanager: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/keymanager/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/keymanager/... .PHONY: acceptance-keymanager acceptance-loadbalancer: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/loadbalancer/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/loadbalancer/... .PHONY: acceptance-loadbalancer acceptance-messaging: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/messaging/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/messaging/... .PHONY: acceptance-messaging acceptance-networking: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/networking/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/networking/... .PHONY: acceptance-networking acceptance-objectstorage: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/objectstorage/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/objectstorage/... .PHONY: acceptance-objectstorage acceptance-orchestration: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/orchestration/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/orchestration/... .PHONY: acceptance-orchestration acceptance-placement: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/placement/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/placement/... .PHONY: acceptance-placement acceptance-sharedfilesystems: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/sharedfilesystems/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/sharedfilesystems/... .PHONY: acceptance-sharefilesystems acceptance-workflow: - $(GO_TEST) -tags "fixtures acceptance" ./internal/acceptance/openstack/workflow/... + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/workflow/... .PHONY: acceptance-workflow diff --git a/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go index 8818e769b8..34d76a1b8d 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go @@ -79,6 +79,11 @@ type EndpointOpts struct { // Required only for services that span multiple regions. Region string + // Version [optional] is the major version of the service required. It it not + // a microversion. Use this to ensure the correct endpoint is selected when + // multiple API versions are available. + Version int + // Availability [optional] is the visibility of the endpoint to be returned. // Valid types include the constants AvailabilityPublic, AvailabilityInternal, // or AvailabilityAdmin from this package. @@ -111,7 +116,7 @@ func (eo *EndpointOpts) ApplyDefaults(t string) { if len(eo.Aliases) == 0 { if aliases, ok := ServiceTypeAliases[eo.Type]; ok { // happy path: user requested a service type by its official name - eo.Aliases = aliases + eo.Aliases = slices.Clone(aliases) } else { // unhappy path: user requested a service type by its alias or an // invalid/unsupported service type @@ -121,7 +126,7 @@ func (eo *EndpointOpts) ApplyDefaults(t string) { // we intentionally override the service type, even if it // was explicitly requested by the user eo.Type = t - eo.Aliases = aliases + eo.Aliases = slices.Clone(aliases) } } } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go index 893787b787..9ecc5b4efe 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/auth_env.go @@ -24,8 +24,8 @@ OS_PROJECT_NAME and the latter are expected against a v3 auth api. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred as "tenant" in Gophercloud. -If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to -handle projects not on the default domain. +If OS_PROJECT_NAME is set, it requires OS_DOMAIN_ID or OS_DOMAIN_NAME to be +set as well to handle projects not on the default domain. To use this function, first set the OS_* environment variables (for example, by sourcing an `openrc` file), then: diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go index 2ab4af93ee..e018b57a8d 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/doc.go @@ -157,5 +157,12 @@ Example of Attaching a Volume to an Instance if err != nil { panic(err) } + +Example of Unmanaging a Volume + + err := volumes.Unmanage(context.TODO(), client, volume.ID).ExtractErr() + if err != nil { + panic(err) + } */ package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go index 77210943b5..1026d1ecaa 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/requests.go @@ -623,6 +623,12 @@ func SetImageMetadata(ctx context.Context, client *gophercloud.ServiceClient, id return } +// BootableOptsBuilder allows extensions to add additional parameters to the +// SetBootable request. +type BootableOptsBuilder interface { + ToBootableMap() (map[string]any, error) +} + // BootableOpts contains options for setting bootable status to a volume. type BootableOpts struct { // Enables or disables the bootable attribute. You can boot an instance from a bootable volume. @@ -636,7 +642,7 @@ func (opts BootableOpts) ToBootableMap() (map[string]any, error) { } // SetBootable will set bootable status on a volume based on the values in BootableOpts -func SetBootable(ctx context.Context, client *gophercloud.ServiceClient, id string, opts BootableOpts) (r SetBootableResult) { +func SetBootable(ctx context.Context, client *gophercloud.ServiceClient, id string, opts BootableOptsBuilder) (r SetBootableResult) { b, err := opts.ToBootableMap() if err != nil { r.Err = err @@ -697,6 +703,12 @@ func ChangeType(ctx context.Context, client *gophercloud.ServiceClient, id strin return } +// ReImageOptsBuilder allows extensions to add additional parameters to the +// ReImage request. +type ReImageOptsBuilder interface { + ToReImageMap() (map[string]any, error) +} + // ReImageOpts contains options for Re-image a volume. type ReImageOpts struct { // New image id @@ -711,7 +723,7 @@ func (opts ReImageOpts) ToReImageMap() (map[string]any, error) { } // ReImage will re-image a volume based on the values in ReImageOpts -func ReImage(ctx context.Context, client *gophercloud.ServiceClient, id string, opts ReImageOpts) (r ReImageResult) { +func ReImage(ctx context.Context, client *gophercloud.ServiceClient, id string, opts ReImageOptsBuilder) (r ReImageResult) { b, err := opts.ToReImageMap() if err != nil { r.Err = err @@ -763,3 +775,14 @@ func ResetStatus(ctx context.Context, client *gophercloud.ServiceClient, id stri _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return } + +// Unmanage removes a volume from Block Storage management without +// removing the back-end storage object that is associated with it. +func Unmanage(ctx context.Context, client *gophercloud.ServiceClient, id string) (r UnmanageResult) { + body := map[string]any{"os-unmanage": make(map[string]any)} + resp, err := client.Post(ctx, actionURL(client, id), body, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go index 3f184b398e..e99ef5e197 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes/results.go @@ -399,3 +399,8 @@ type ReImageResult struct { type ResetStatusResult struct { gophercloud.ErrResult } + +// UnmanageResult contains the response error from a Unmanage request. +type UnmanageResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go index 122a3ee699..73ca5c56d5 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go @@ -2,6 +2,7 @@ package openstack import ( "context" + "errors" "fmt" "reflect" "strings" @@ -162,7 +163,7 @@ func v2auth(ctx context.Context, client *gophercloud.ProviderClient, endpoint st } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V2EndpointURL(catalog, opts) + return V2Endpoint(context.TODO(), client, catalog, opts) } return nil @@ -283,7 +284,7 @@ func v3auth(ctx context.Context, client *gophercloud.ProviderClient, endpoint st } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(catalog, opts) + return V3Endpoint(context.TODO(), client, catalog, opts) } return nil @@ -345,13 +346,20 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp } // TODO(stephenfin): Allow passing aliases to all New${SERVICE}V${VERSION} methods in v3 -func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { +func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string, version int) (*gophercloud.ServiceClient, error) { sc := new(gophercloud.ServiceClient) + eo.ApplyDefaults(clientType) + if eo.Version != 0 && eo.Version != version { + return sc, errors.New("Conflict between requested service major version and manually set version") + } + eo.Version = version + url, err := client.EndpointLocator(eo) if err != nil { return sc, err } + sc.ProviderClient = client sc.Endpoint = url sc.Type = clientType @@ -361,7 +369,7 @@ func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointO // NewBareMetalV1 creates a ServiceClient that may be used with the v1 // bare metal package. func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "baremetal") + sc, err := initClientOpts(client, eo, "baremetal", 1) if !strings.HasSuffix(strings.TrimSuffix(sc.Endpoint, "/"), "v1") { sc.ResourceBase = sc.Endpoint + "v1/" } @@ -371,25 +379,25 @@ func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointO // NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 // bare metal introspection package. func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal-introspection") + return initClientOpts(client, eo, "baremetal-introspection", 1) } // NewObjectStorageV1 creates a ServiceClient that may be used with the v1 // object storage package. func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "object-store") + return initClientOpts(client, eo, "object-store", 1) } // NewComputeV2 creates a ServiceClient that may be used with the v2 compute // package. func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "compute") + return initClientOpts(client, eo, "compute", 2) } // NewNetworkV2 creates a ServiceClient that may be used with the v2 network // package. func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "network") + sc, err := initClientOpts(client, eo, "network", 2) sc.ResourceBase = sc.Endpoint + "v2.0/" return sc, err } @@ -398,40 +406,40 @@ func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpt // NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 // block storage service. func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volume") + return initClientOpts(client, eo, "volume", 1) } // NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 // block storage service. func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "block-storage") + return initClientOpts(client, eo, "block-storage", 2) } // NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "block-storage") + return initClientOpts(client, eo, "block-storage", 3) } // NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "shared-file-system") + return initClientOpts(client, eo, "shared-file-system", 2) } // NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 // orchestration service. func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "orchestration") + return initClientOpts(client, eo, "orchestration", 1) } // NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "database") + return initClientOpts(client, eo, "database", 1) } // NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS // service. func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "dns") + sc, err := initClientOpts(client, eo, "dns", 2) sc.ResourceBase = sc.Endpoint + "v2/" return sc, err } @@ -439,7 +447,7 @@ func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) ( // NewImageV2 creates a ServiceClient that may be used to access the v2 image // service. func NewImageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "image") + sc, err := initClientOpts(client, eo, "image", 2) sc.ResourceBase = sc.Endpoint + "v2/" return sc, err } @@ -447,7 +455,7 @@ func NewImageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) // NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 // load balancer service. func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "load-balancer") + sc, err := initClientOpts(client, eo, "load-balancer", 2) // Fixes edge case having an OpenStack lb endpoint with trailing version number. endpoint := strings.Replace(sc.Endpoint, "v2.0/", "", -1) @@ -459,20 +467,20 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi // NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging // service. func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "message") + sc, err := initClientOpts(client, eo, "message", 2) sc.MoreHeaders = map[string]string{"Client-ID": clientID} return sc, err } // NewContainerV1 creates a ServiceClient that may be used with v1 container package func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "application-container") + return initClientOpts(client, eo, "application-container", 1) } // NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key // manager service. func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "key-manager") + sc, err := initClientOpts(client, eo, "key-manager", 1) sc.ResourceBase = sc.Endpoint + "v1/" return sc, err } @@ -480,15 +488,15 @@ func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.Endpoint // NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management // package. func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container-infrastructure-management") + return initClientOpts(client, eo, "container-infrastructure-management", 1) } // NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "workflow") + return initClientOpts(client, eo, "workflow", 2) } // NewPlacementV1 creates a ServiceClient that may be used with the placement package. func NewPlacementV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "placement") + return initClientOpts(client, eo, "placement", 1) } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go index 44e8cccaeb..c0ccebfa49 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go @@ -1,10 +1,12 @@ package servers import ( + "bytes" "context" "encoding/base64" "encoding/json" "fmt" + "io" "maps" "net" "regexp" @@ -651,6 +653,12 @@ type UpdateOpts struct { // AccessIPv6 provides a new IPv6 address for the instance. AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Hostname changes the hostname of the server. + // Requires microversion 2.90 or later. + // Note: This information is published via the metadata service and requires + // application such as cloud-init to propagate it through to the instance. + Hostname *string `json:"hostname,omitempty"` } // ToServerUpdateMap formats an UpdateOpts structure into a request body. @@ -1044,10 +1052,35 @@ func CreateImage(ctx context.Context, client *gophercloud.ServiceClient, id stri r.Err = err return } + resp, err := client.Post(ctx, actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, + OkCodes: []int{202}, + KeepResponseBody: true, }) + _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) + if r.Err != nil { + return + } + defer resp.Body.Close() + + if v := r.Header.Get("Content-Type"); v != "application/json" { + return + } + + // The response body is expected to be a small JSON object containing only "image_id". + // Read it fully into memory so the response body can be closed immediately. + // If the caller doesn't read from the buffer, it can still be safely garbage collected. + + var buf bytes.Buffer + + _, r.Err = io.Copy(&buf, resp.Body) + if r.Err != nil { + return + } + + r.Body = &buf + return } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go index 385001c8dd..edc2740f68 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go @@ -7,9 +7,11 @@ import ( "fmt" "net/url" "path" + "strconv" "time" "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack/utils" "github.com/gophercloud/gophercloud/v2/pagination" ) @@ -132,18 +134,49 @@ func (r CreateImageResult) ExtractImageID() (string, error) { if r.Err != nil { return "", r.Err } - // Get the image id from the header + + microversion := r.Header.Get("X-OpenStack-Nova-API-Version") + + major, minor, err := utils.ParseMicroversion(microversion) + if err != nil { + return "", fmt.Errorf("failed to parse X-OpenStack-Nova-API-Version header: %s", err) + } + + // In microversions prior to 2.45, the image ID was provided in the Location header. + if major < 2 || (major == 2 && minor < 45) { + return r.extractImageIDFromLocationHeader() + } + + // Starting from 2.45, it is included in the response body. + return r.extractImageIDFromResponseBody() +} + +func (r CreateImageResult) extractImageIDFromLocationHeader() (string, error) { u, err := url.ParseRequestURI(r.Header.Get("Location")) if err != nil { return "", err } + imageID := path.Base(u.Path) if imageID == "." || imageID == "/" { return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) } + return imageID, nil } +func (r CreateImageResult) extractImageIDFromResponseBody() (string, error) { + var response struct { + ImageID string `json:"image_id"` + } + + if err := r.ExtractInto(&response); err != nil { + return "", err + } + + return response.ImageID, nil +} + // Server represents a server/instance in the OpenStack cloud. type Server struct { // ID uniquely identifies this server amongst all other servers, @@ -283,6 +316,9 @@ type Server struct { // Locked indicates the lock status of the server // This requires microversion 2.9 or later Locked *bool `json:"locked"` + + // ConfigDrive enables metadata injection through a configuration drive. + ConfigDrive bool `json:"-"` } type AttachedVolume struct { @@ -343,6 +379,7 @@ func (r *Server) UnmarshalJSON(b []byte) error { Image any `json:"image"` LaunchedAt gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:launched_at"` TerminatedAt gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:terminated_at"` + ConfigDrive any `json:"config_drive"` } err := json.Unmarshal(b, &s) if err != nil { @@ -364,6 +401,24 @@ func (r *Server) UnmarshalJSON(b []byte) error { r.LaunchedAt = time.Time(s.LaunchedAt) r.TerminatedAt = time.Time(s.TerminatedAt) + switch t := s.ConfigDrive.(type) { + case nil: + r.ConfigDrive = false + case bool: + r.ConfigDrive = t + case string: + if t == "" { + r.ConfigDrive = false + } else { + r.ConfigDrive, err = strconv.ParseBool(t) + if err != nil { + return fmt.Errorf("failed to parse ConfigDrive %q: %v", t, err) + } + } + default: + return fmt.Errorf("unknown type for ConfigDrive: %T (value: %v)", t, t) + } + return err } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go new file mode 100644 index 0000000000..6178434423 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint.go @@ -0,0 +1,190 @@ +package openstack + +import ( + "context" + "regexp" + "slices" + "strconv" + + "github.com/gophercloud/gophercloud/v2" + tokens2 "github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/v2/openstack/utils" +) + +var versionedServiceTypeAliasRegexp = regexp.MustCompile(`^.*v(\d)$`) + +func extractServiceTypeVersion(serviceType string) int { + matches := versionedServiceTypeAliasRegexp.FindAllStringSubmatch(serviceType, 1) + if matches != nil { + // no point converting to an int + ret, err := strconv.Atoi(matches[0][1]) + if err != nil { + return 0 + } + return ret + } + return 0 +} + +func endpointSupportsVersion(ctx context.Context, client *gophercloud.ProviderClient, serviceType, endpointURL string, expectedVersion int) (bool, error) { + // Swift doesn't support version discovery :( + if expectedVersion == 0 || serviceType == "object-store" { + return true, nil + } + + // Repeating verbatim from keystoneauth1 [1]: + // + // > The sins of our fathers become the blood on our hands. + // > If a user requests an old-style service type such as volumev2, then they + // > are inherently requesting the major API version 2. It's not a good + // > interface, but it's the one that was imposed on the world years ago + // > because the client libraries all hid the version discovery document. + // > In order to be able to ensure that a user who requests volumev2 does not + // > get a block-storage endpoint that only provides v3 of the block-storage + // > service, we need to pull the version out of the service_type. The + // > service-types-authority will prevent the growth of new monstrosities such + // > as this, but in order to move forward without breaking people, we have + // > to just cry in the corner while striking ourselves with thorned branches. + // > That said, for sure only do this hack for officially known service_types. + // + // So yeah, what mordred said. + // + // https://github.com/openstack/keystoneauth/blob/5.10.0/keystoneauth1/discover.py#L270-L290 + impliedVersion := extractServiceTypeVersion(serviceType) + if impliedVersion != 0 && impliedVersion != expectedVersion { + return false, nil + } + + // NOTE(stephenfin) In addition to the above, keystoneauth also supports a URL + // hack whereby it will extract the version from the URL. We may wish to + // implement this too. + + endpointURL, err := utils.BaseVersionedEndpoint(endpointURL) + if err != nil { + return false, err + } + + supportedVersions, err := utils.GetServiceVersions(ctx, client, endpointURL, false) + if err != nil { + return false, err + } + + for _, supportedVersion := range supportedVersions { + if supportedVersion.Major == expectedVersion { + return true, nil + } + } + + return false, nil +} + +/* +V2Endpoint discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V2Endpoint(ctx context.Context, client *gophercloud.ProviderClient, catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. + for _, entry := range catalog.Entries { + if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region != "" && endpoint.Region != opts.Region { + continue + } + + var endpointURL string + switch opts.Availability { + case gophercloud.AvailabilityPublic: + endpointURL = gophercloud.NormalizeURL(endpoint.PublicURL) + case gophercloud.AvailabilityInternal: + endpointURL = gophercloud.NormalizeURL(endpoint.InternalURL) + case gophercloud.AvailabilityAdmin: + endpointURL = gophercloud.NormalizeURL(endpoint.AdminURL) + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + + endpointSupportsVersion, err := endpointSupportsVersion(ctx, client, entry.Type, endpointURL, opts.Version) + if err != nil { + return "", err + } + if !endpointSupportsVersion { + continue + } + + return endpointURL, nil + } + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} + +/* +V3Endpoint discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V3Endpoint(ctx context.Context, client *gophercloud.ProviderClient, catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. + for _, entry := range catalog.Entries { + if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.Availability(endpoint.Interface) { + continue + } + if opts.Region != "" && endpoint.Region != opts.Region && endpoint.RegionID != opts.Region { + continue + } + + endpointURL := gophercloud.NormalizeURL(endpoint.URL) + + endpointSupportsVersion, err := endpointSupportsVersion(ctx, client, entry.Type, endpointURL, opts.Version) + if err != nil { + return "", err + } + if !endpointSupportsVersion { + continue + } + + return endpointURL, nil + } + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go index 14cff0d755..573c1f06f4 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go @@ -8,6 +8,8 @@ import ( tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens" ) +// TODO(stephenfin): Remove this module in v3. The functions below are no longer used. + /* V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired during the v2 identity service. @@ -20,39 +22,33 @@ available on your OpenStack deployment. */ func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. - var endpoints = make([]tokens2.Endpoint, 0, 1) + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. for _, entry := range catalog.Entries { if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { for _, endpoint := range entry.Endpoints { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) + if opts.Region != "" && endpoint.Region != opts.Region { + continue } - } - } - } - // If multiple endpoints were found, use the first result - // and disregard the other endpoints. - // - // This behavior matches the Python library. See GH-1764. - if len(endpoints) > 1 { - endpoints = endpoints[0:1] - } + var endpointURL string + switch opts.Availability { + case gophercloud.AvailabilityPublic: + endpointURL = gophercloud.NormalizeURL(endpoint.PublicURL) + case gophercloud.AvailabilityInternal: + endpointURL = gophercloud.NormalizeURL(endpoint.InternalURL) + case gophercloud.AvailabilityAdmin: + endpointURL = gophercloud.NormalizeURL(endpoint.AdminURL) + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } - // Extract the appropriate URL from the matching Endpoint. - for _, endpoint := range endpoints { - switch opts.Availability { - case gophercloud.AvailabilityPublic: - return gophercloud.NormalizeURL(endpoint.PublicURL), nil - case gophercloud.AvailabilityInternal: - return gophercloud.NormalizeURL(endpoint.InternalURL), nil - case gophercloud.AvailabilityAdmin: - return gophercloud.NormalizeURL(endpoint.AdminURL), nil - default: - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err + return endpointURL, nil + } } } @@ -72,41 +68,35 @@ will also often need to specify a Name and/or a Region depending on what's available on your OpenStack deployment. */ func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + // Extract Endpoints from the catalog entries that match the requested Type, Interface, // Name if provided, and Region if provided. - var endpoints = make([]tokens3.Endpoint, 0, 1) + // + // If multiple endpoints are found, we return the first result and disregard the rest. + // This behavior matches the Python library. See GH-1764. for _, entry := range catalog.Entries { if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { for _, endpoint := range entry.Endpoints { - if opts.Availability != gophercloud.AvailabilityAdmin && - opts.Availability != gophercloud.AvailabilityPublic && - opts.Availability != gophercloud.AvailabilityInternal { - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err + if opts.Availability != gophercloud.Availability(endpoint.Interface) { + continue } - if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { - endpoints = append(endpoints, endpoint) + if opts.Region != "" && endpoint.Region != opts.Region && endpoint.RegionID != opts.Region { + continue } + + return gophercloud.NormalizeURL(endpoint.URL), nil } } } - // If multiple endpoints were found, use the first result - // and disregard the other endpoints. - // - // This behavior matches the Python library. See GH-1764. - if len(endpoints) > 1 { - endpoints = endpoints[0:1] - } - - // Extract the URL from the matching Endpoint. - for _, endpoint := range endpoints { - return gophercloud.NormalizeURL(endpoint.URL), nil - } - // Report an error if there were no matching endpoints. err := &gophercloud.ErrEndpointNotFound{} return "", err diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go index a08980df2c..84a8b9df1d 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tenants/requests.go @@ -7,6 +7,12 @@ import ( "github.com/gophercloud/gophercloud/v2/pagination" ) +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToTenantListQuery() (string, error) +} + // ListOpts filters the Tenants that are returned by the List call. type ListOpts struct { // Marker is the ID of the last Tenant on the previous page. @@ -16,15 +22,21 @@ type ListOpts struct { Limit int `q:"limit"` } +// ToTenantListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToTenantListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + // List enumerates the Tenants to which the current token has access. -func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { url := listURL(client) if opts != nil { - q, err := gophercloud.BuildQueryString(opts) + query, err := opts.ToTenantListQuery() if err != nil { return pagination.Pager{Err: err} } - url += q.String() + url += query } return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { return TenantPage{pagination.LinkedPageBase{PageResult: r}} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go index 5b1f3d6882..1d4cb54928 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/ec2tokens/requests.go @@ -300,8 +300,7 @@ func Create(ctx context.Context, c *gophercloud.ServiceClient, opts tokens.AuthO deleteBodyElements(b, "token") resp, err := c.Post(ctx, ec2tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - OkCodes: []int{200}, + OkCodes: []int{200}, }) _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return @@ -320,8 +319,7 @@ func ValidateS3Token(ctx context.Context, c *gophercloud.ServiceClient, opts tok deleteBodyElements(b, "body_hash", "headers", "host", "params", "path", "verb") resp, err := c.Post(ctx, s3tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - OkCodes: []int{200}, + OkCodes: []int{200}, }) _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go index 8c66b36e20..0b23269ffa 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/identity/v3/oauth1/requests.go @@ -214,6 +214,12 @@ func GetConsumer(ctx context.Context, client *gophercloud.ServiceClient, id stri return } +// UpdateConsumerOptsBuilder allows extensions to add additional parameters to the +// UpdateConsumer request. +type UpdateConsumerOptsBuilder interface { + ToOAuth1UpdateConsumerMap() (map[string]any, error) +} + // UpdateConsumerOpts provides options used to update a consumer. type UpdateConsumerOpts struct { // Description is the consumer description. @@ -227,7 +233,7 @@ func (opts UpdateConsumerOpts) ToOAuth1UpdateConsumerMap() (map[string]any, erro } // UpdateConsumer updates an existing Consumer. -func UpdateConsumer(ctx context.Context, client *gophercloud.ServiceClient, id string, opts UpdateConsumerOpts) (r UpdateConsumerResult) { +func UpdateConsumer(ctx context.Context, client *gophercloud.ServiceClient, id string, opts UpdateConsumerOptsBuilder) (r UpdateConsumerResult) { b, err := opts.ToOAuth1UpdateConsumerMap() if err != nil { r.Err = err diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go index 147be19927..eedc13a330 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/image/v2/images/types.go @@ -13,10 +13,14 @@ const ( // been reserved for an image in the image registry. ImageStatusQueued ImageStatus = "queued" - // ImageStatusSaving denotes that an image’s raw data is currently being + // ImageStatusSaving denotes that an image's raw data is currently being // uploaded to Glance ImageStatusSaving ImageStatus = "saving" + // ImageStatusUploading denotes that an image's raw data is currently being + // uploaded to Glance through the upload process + ImageStatusUploading ImageStatus = "uploading" + // ImageStatusActive denotes an image that is fully available in Glance. ImageStatusActive ImageStatus = "active" diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go index 710a6edf5b..67196a5202 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/flavors/requests.go @@ -127,7 +127,7 @@ func (opts UpdateOpts) ToFlavorUpdateMap() (map[string]any, error) { // Update is an operation which modifies the attributes of the specified // Flavor. -func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { +func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToFlavorUpdateMap() if err != nil { r.Err = err diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go index 62a4f179ee..ab0b22c6bc 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/l7policies/requests.go @@ -263,6 +263,12 @@ func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts U return } +// CreateRuleOptsBuilder allows extensions to add additional parameters to the +// CreateRule request. +type CreateRuleOptsBuilder interface { + ToRuleCreateMap() (map[string]any, error) +} + // CreateRuleOpts is the common options struct used in this package's CreateRule // operation. type CreateRuleOpts struct { @@ -300,7 +306,7 @@ func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]any, error) { } // CreateRule will create and associate a Rule with a particular L7Policy. -func CreateRule(ctx context.Context, c *gophercloud.ServiceClient, policyID string, opts CreateRuleOpts) (r CreateRuleResult) { +func CreateRule(ctx context.Context, c *gophercloud.ServiceClient, policyID string, opts CreateRuleOptsBuilder) (r CreateRuleResult) { b, err := opts.ToRuleCreateMap() if err != nil { r.Err = err diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go index 3216fbddd0..abd5d08970 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners/requests.go @@ -380,7 +380,7 @@ func (opts UpdateOpts) ToListenerUpdateMap() (map[string]any, error) { // Update is an operation which modifies the attributes of the specified // Listener. -func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { +func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToListenerUpdateMap() if err != nil { r.Err = err diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go index f815806f39..095170edd3 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers/requests.go @@ -208,7 +208,7 @@ func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]any, error) { // Update is an operation which modifies the attributes of the specified // LoadBalancer. -func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { +func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToLoadBalancerUpdateMap() if err != nil { r.Err = err diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go index be5701c5f4..15a503badc 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/requests.go @@ -2,6 +2,7 @@ package monitors import ( "context" + "strconv" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/pagination" @@ -153,7 +154,25 @@ type CreateOpts struct { // ToMonitorCreateMap builds a request body from CreateOpts. func (opts CreateOpts) ToMonitorCreateMap() (map[string]any, error) { - return gophercloud.BuildRequestBody(opts, "healthmonitor") + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + if v, ok := b["healthmonitor"]; ok { + if m, ok := v.(map[string]any); ok { + if v, ok := m["http_version"]; ok { + if v, ok := v.(string); ok { + m["http_version"], err = strconv.ParseFloat(v, 64) + if err != nil { + return nil, err + } + } + } + } + } + + return b, nil } /* @@ -247,7 +266,25 @@ type UpdateOpts struct { // ToMonitorUpdateMap builds a request body from UpdateOpts. func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]any, error) { - return gophercloud.BuildRequestBody(opts, "healthmonitor") + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + if v, ok := b["healthmonitor"]; ok { + if m, ok := v.(map[string]any); ok { + if v, ok := m["http_version"]; ok { + if v, ok := v.(string); ok { + m["http_version"], err = strconv.ParseFloat(v, 64) + if err != nil { + return nil, err + } + } + } + } + } + + return b, nil } // Update is an operation which modifies the attributes of the specified diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go index 644ef18700..6e8563faaa 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/monitors/results.go @@ -1,6 +1,9 @@ package monitors import ( + "encoding/json" + "strconv" + "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/pagination" ) @@ -61,7 +64,7 @@ type Monitor struct { HTTPMethod string `json:"http_method"` // The HTTP version that the monitor uses for requests. - HTTPVersion string `json:"http_version"` + HTTPVersion string `json:"-"` // The HTTP path of the request sent by the monitor to test the health of a // member. Must be a string beginning with a forward slash (/). @@ -96,6 +99,26 @@ type Monitor struct { Tags []string `json:"tags"` } +func (r *Monitor) UnmarshalJSON(b []byte) error { + type tmp Monitor + var s struct { + tmp + HTTPVersion float64 `json:"http_version"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Monitor(s.tmp) + if s.HTTPVersion != 0 { + r.HTTPVersion = strconv.FormatFloat(s.HTTPVersion, 'f', 1, 64) + } + + return nil +} + // MonitorPage is the page returned by a pager when traversing over a // collection of health monitors. type MonitorPage struct { diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go new file mode 100644 index 0000000000..85dff7818c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/constants.go @@ -0,0 +1,7 @@ +package floatingips + +const ( + StatusActive = "ACTIVE" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go index f6ca654841..def4699db3 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -8,6 +8,12 @@ import ( "github.com/gophercloud/gophercloud/v2/pagination" ) +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToRouterListQuery() (string, error) +} + // ListOpts allows the filtering and sorting of paginated collections through // the API. Filtering is achieved by passing in struct field values that map to // the floating IP attributes you want to see returned. SortKey allows you to @@ -33,19 +39,31 @@ type ListOpts struct { RevisionNumber *int `q:"revision_number"` } +// ToRouterListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRouterListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return "", err + } + return q.String(), nil +} + // List returns a Pager which allows you to iterate over a collection of // routers. It accepts a ListOpts struct, which allows you to filter and sort // the returned collection for greater efficiency. // // Default policy settings return only those routers that are owned by the // tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToRouterListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { return RouterPage{pagination.LinkedPageBase{PageResult: r}} }) } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go index d75615b773..d657160ba2 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/routers/results.go @@ -21,7 +21,7 @@ type GatewayInfo struct { // router. type ExternalFixedIP struct { IPAddress string `json:"ip_address,omitempty"` - SubnetID string `json:"subnet_id"` + SubnetID string `json:"subnet_id,omitempty"` } // Route is a possible route in a router. @@ -82,10 +82,48 @@ type Router struct { RevisionNumber int `json:"revision_number"` // Timestamp when the router was created - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"-"` // Timestamp when the router was last updated - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"-"` +} + +func (r *Router) UnmarshalJSON(b []byte) error { + type tmp Router + + // Support for older neutron time format + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = Router(s1.tmp) + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + + return nil + } + + // Support for newer neutron time format + var s2 struct { + tmp + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = Router(s2.tmp) + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + + return nil } // RouterPage is the page returned by a pager when traversing over a @@ -122,11 +160,14 @@ func (r RouterPage) IsEmpty() (bool, error) { // and extracts the elements into a slice of Router structs. In other words, // a generic collection is mapped into a relevant slice. func ExtractRouters(r pagination.Page) ([]Router, error) { - var s struct { - Routers []Router `json:"routers"` - } - err := (r.(RouterPage)).ExtractInto(&s) - return s.Routers, err + var s []Router + err := ExtractRoutersInto(r, &s) + return s, err +} + +// ExtractRoutersInto extracts the elements into a slice of Router structs. +func ExtractRoutersInto(r pagination.Page, v any) error { + return r.(RouterPage).Result.ExtractIntoSlicePtr(v, "routers") } type commonResult struct { diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go index 77768a3dac..edd253f037 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/requests.go @@ -7,41 +7,60 @@ import ( "github.com/gophercloud/gophercloud/v2/pagination" ) +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSecGroupListQuery() (string, error) +} + // ListOpts allows the filtering and sorting of paginated collections through // the API. Filtering is achieved by passing in struct field values that map to // the security group rule attributes you want to see returned. SortKey allows // you to sort by a particular network attribute. SortDir sets the direction, // and is either `asc' or `desc'. Marker and Limit are used for pagination. type ListOpts struct { - Direction string `q:"direction"` - EtherType string `q:"ethertype"` - ID string `q:"id"` - Description string `q:"description"` - PortRangeMax int `q:"port_range_max"` - PortRangeMin int `q:"port_range_min"` - Protocol string `q:"protocol"` - RemoteGroupID string `q:"remote_group_id"` - RemoteIPPrefix string `q:"remote_ip_prefix"` - SecGroupID string `q:"security_group_id"` - TenantID string `q:"tenant_id"` - ProjectID string `q:"project_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` - RevisionNumber *int `q:"revision_number"` + Direction string `q:"direction"` + EtherType string `q:"ethertype"` + ID string `q:"id"` + Description string `q:"description"` + PortRangeMax int `q:"port_range_max"` + PortRangeMin int `q:"port_range_min"` + Protocol string `q:"protocol"` + RemoteAddressGroupID string `q:"remote_address_group_id"` + RemoteGroupID string `q:"remote_group_id"` + RemoteIPPrefix string `q:"remote_ip_prefix"` + SecGroupID string `q:"security_group_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + RevisionNumber *int `q:"revision_number"` +} + +// ToSecGroupListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSecGroupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return "", err + } + return q.String(), nil } // List returns a Pager which allows you to iterate over a collection of // security group rules. It accepts a ListOpts struct, which allows you to filter // and sort the returned collection for greater efficiency. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToSecGroupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}} }) } @@ -106,7 +125,7 @@ type CreateOpts struct { // The maximum port number in the range that is matched by the security group // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If - // the protocol is ICMP, this value must be an ICMP type. + // the protocol is ICMP, this value must be an ICMP code. PortRangeMax int `json:"port_range_max,omitempty"` // The minimum port number in the range that is matched by the security group @@ -119,12 +138,16 @@ type CreateOpts struct { // "tcp", "udp", "icmp" or an empty string. Protocol RuleProtocol `json:"protocol,omitempty"` + // The remote address group ID to be associated with this security group rule. + // You can specify either RemoteAddressGroupID, RemoteGroupID, or RemoteIPPrefix + RemoteAddressGroupID string `json:"remote_address_group_id,omitempty"` + // The remote group ID to be associated with this security group rule. You can - // specify either RemoteGroupID or RemoteIPPrefix. + // specify either RemoteAddressGroupID,RemoteGroupID or RemoteIPPrefix. RemoteGroupID string `json:"remote_group_id,omitempty"` // The remote IP prefix to be associated with this security group rule. You can - // specify either RemoteGroupID or RemoteIPPrefix. This attribute matches the + // specify either RemoteAddressGroupID,RemoteGroupID or RemoteIPPrefix. This attribute matches the // specified IP prefix as the source IP address of the IP packet. RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"` diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go index 8a3355dfe0..03696ac203 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/security/rules/results.go @@ -1,6 +1,7 @@ package rules import ( + "encoding/json" "time" "github.com/gophercloud/gophercloud/v2" @@ -44,6 +45,10 @@ type SecGroupRule struct { // "tcp", "udp", "icmp" or an empty string. Protocol string + // The remote address group ID to be associated with this security group rule. + // You can specify either RemoteAddressGroupID, RemoteGroupID, or RemoteIPPrefix + RemoteAddressGroupID string `json:"remote_address_group_id"` + // The remote group ID to be associated with this security group rule. You // can specify either RemoteGroupID or RemoteIPPrefix. RemoteGroupID string `json:"remote_group_id"` @@ -63,10 +68,48 @@ type SecGroupRule struct { RevisionNumber int `json:"revision_number"` // Timestamp when the rule was created - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"-"` // Timestamp when the rule was last updated - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"-"` +} + +func (r *SecGroupRule) UnmarshalJSON(b []byte) error { + type tmp SecGroupRule + + // Support for older neutron time format + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = SecGroupRule(s1.tmp) + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + + return nil + } + + // Support for newer neutron time format + var s2 struct { + tmp + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = SecGroupRule(s2.tmp) + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + + return nil } // SecGroupRulePage is the page returned by a pager when traversing over a diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go new file mode 100644 index 0000000000..6bec77fa79 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks/constants.go @@ -0,0 +1,9 @@ +package trunks + +const ( + StatusActive = "ACTIVE" + StatusBuild = "BUILD" + StatusDegraded = "DEGRADED" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go new file mode 100644 index 0000000000..1214ce9deb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks/constants.go @@ -0,0 +1,8 @@ +package networks + +const ( + StatusActive = "ACTIVE" + StatusBuild = "BUILD" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go new file mode 100644 index 0000000000..6275839bf4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/constants.go @@ -0,0 +1,8 @@ +package ports + +const ( + StatusActive = "ACTIVE" + StatusBuild = "BUILD" + StatusDown = "DOWN" + StatusError = "ERROR" +) diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go index 74a0fa3b49..db223d48c1 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go @@ -49,7 +49,7 @@ type DeleteResult struct { // IP is a sub-struct that represents an individual IP. type IP struct { - SubnetID string `json:"subnet_id"` + SubnetID string `json:"subnet_id,omitempty"` IPAddress string `json:"ip_address,omitempty"` } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go index 150afd7394..85c5d2b402 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/requests.go @@ -43,6 +43,7 @@ type ListOpts struct { NotTags string `q:"not-tags"` NotTagsAny string `q:"not-tags-any"` RevisionNumber *int `q:"revision_number"` + SegmentID string `q:"segment_id"` } // ToSubnetListQuery formats a ListOpts into a query string. @@ -147,6 +148,10 @@ type CreateOpts struct { // Prefixlen is used when user creates a subnet from the subnetpool. It will // overwrite the "default_prefixlen" value of the referenced subnetpool. Prefixlen int `json:"prefixlen,omitempty"` + + // SegmentID is a network segment the subnet is associated with. It is + // available when segment extension is enabled. + SegmentID string `json:"segment_id,omitempty"` } // ToSubnetCreateMap builds a request body from CreateOpts. @@ -194,9 +199,8 @@ type UpdateOpts struct { // AllocationPools are IP Address pools that will be available for DHCP. AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` - // GatewayIP sets gateway information for the subnet. Setting to nil will - // cause a default gateway to automatically be created. Setting to an empty - // string will cause the subnet to be created with no gateway. Setting to + // GatewayIP sets gateway information for the subnet. Setting to an empty + // string will cause the subnet to not have a gateway. Setting to // an explicit address will set that address as the gateway. GatewayIP *string `json:"gateway_ip,omitempty"` @@ -219,6 +223,10 @@ type UpdateOpts struct { // will set revision_number=%s. If the revision number does not match, the // update will fail. RevisionNumber *int `json:"-" h:"If-Match"` + + // SegmentID is a network segment the subnet is associated with. It is + // available when segment extension is enabled. + SegmentID *string `json:"segment_id,omitempty"` } // ToSubnetUpdateMap builds a request body from UpdateOpts. diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go index 01c6acc070..4f0aa8408d 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets/results.go @@ -1,6 +1,7 @@ package subnets import ( + "encoding/json" "time" "github.com/gophercloud/gophercloud/v2" @@ -124,11 +125,53 @@ type Subnet struct { // RevisionNumber optionally set via extensions/standard-attr-revisions RevisionNumber int `json:"revision_number"` + // SegmentID of a network segment the subnet is associated with. It is + // available when segment extension is enabled. + SegmentID string `json:"segment_id"` + // Timestamp when the subnet was created - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"-"` // Timestamp when the subnet was last updated - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"-"` +} + +func (r *Subnet) UnmarshalJSON(b []byte) error { + type tmp Subnet + + // Support for older neutron time format + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = Subnet(s1.tmp) + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + + return nil + } + + // Support for newer neutron time format + var s2 struct { + tmp + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = Subnet(s2.tmp) + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + + return nil } // SubnetPage is the page returned by a pager when traversing over a collection diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go index 40080f7af2..f219c0bf4d 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/base_endpoint.go @@ -6,9 +6,7 @@ import ( "strings" ) -// BaseEndpoint will return a URL without the /vX.Y -// portion of the URL. -func BaseEndpoint(endpoint string) (string, error) { +func parseEndpoint(endpoint string, includeVersion bool) (string, error) { u, err := url.Parse(endpoint) if err != nil { return "", err @@ -21,8 +19,23 @@ func BaseEndpoint(endpoint string) (string, error) { if version := versionRe.FindString(path); version != "" { versionIndex := strings.Index(path, version) + if includeVersion { + versionIndex += len(version) + } u.Path = path[:versionIndex] } return u.String(), nil } + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + return parseEndpoint(endpoint, false) +} + +// BaseVersionedEndpoint will return a URL with the /vX.Y portion of the URL, +// if present, but without a project ID or similar +func BaseVersionedEndpoint(endpoint string) (string, error) { + return parseEndpoint(endpoint, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go index 6c720e57ef..ccc56345a6 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go @@ -3,7 +3,6 @@ package utils import ( "context" "fmt" - "strconv" "strings" "github.com/gophercloud/gophercloud/v2" @@ -29,6 +28,7 @@ var goodStatus = map[string]bool{ // It returns the highest-Priority Version, OR exact match with client endpoint, // among the alternatives that are provided, as well as its corresponding endpoint. func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + // TODO(stephenfin): This could be removed since we can accomplish this with GetServiceVersions now. type linkResp struct { Href string `json:"href"` Rel string `json:"rel"` @@ -114,123 +114,3 @@ func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, reco return highest, endpoint, nil } - -type SupportedMicroversions struct { - MaxMajor int - MaxMinor int - MinMajor int - MinMinor int -} - -// GetSupportedMicroversions returns the minimum and maximum microversion that is supported by the ServiceClient Endpoint. -func GetSupportedMicroversions(ctx context.Context, client *gophercloud.ServiceClient) (SupportedMicroversions, error) { - type valueResp struct { - ID string `json:"id"` - Status string `json:"status"` - Version string `json:"version"` - MinVersion string `json:"min_version"` - } - - type response struct { - Version valueResp `json:"version"` - Versions []valueResp `json:"versions"` - } - var minVersion, maxVersion string - var supportedMicroversions SupportedMicroversions - var resp response - _, err := client.Get(ctx, client.Endpoint, &resp, &gophercloud.RequestOpts{ - OkCodes: []int{200, 300}, - }) - - if err != nil { - return supportedMicroversions, err - } - - if len(resp.Versions) > 0 { - // We are dealing with an unversioned endpoint - // We only handle the case when there is exactly one, and assume it is the correct one - if len(resp.Versions) > 1 { - return supportedMicroversions, fmt.Errorf("unversioned endpoint with multiple alternatives not supported") - } - minVersion = resp.Versions[0].MinVersion - maxVersion = resp.Versions[0].Version - } else { - minVersion = resp.Version.MinVersion - maxVersion = resp.Version.Version - } - - // Return early if the endpoint does not support microversions - if minVersion == "" && maxVersion == "" { - return supportedMicroversions, fmt.Errorf("microversions not supported by ServiceClient Endpoint") - } - - supportedMicroversions.MinMajor, supportedMicroversions.MinMinor, err = ParseMicroversion(minVersion) - if err != nil { - return supportedMicroversions, err - } - - supportedMicroversions.MaxMajor, supportedMicroversions.MaxMinor, err = ParseMicroversion(maxVersion) - if err != nil { - return supportedMicroversions, err - } - - return supportedMicroversions, nil -} - -// RequireMicroversion checks that the required microversion is supported and -// returns a ServiceClient with the microversion set. -func RequireMicroversion(ctx context.Context, client gophercloud.ServiceClient, required string) (gophercloud.ServiceClient, error) { - supportedMicroversions, err := GetSupportedMicroversions(ctx, &client) - if err != nil { - return client, fmt.Errorf("unable to determine supported microversions: %w", err) - } - supported, err := supportedMicroversions.IsSupported(required) - if err != nil { - return client, err - } - if !supported { - return client, fmt.Errorf("microversion %s not supported. Supported versions: %v", required, supportedMicroversions) - } - client.Microversion = required - return client, nil -} - -// IsSupported checks if a microversion falls in the supported interval. -// It returns true if the version is within the interval and false otherwise. -func (supported SupportedMicroversions) IsSupported(version string) (bool, error) { - // Parse the version X.Y into X and Y integers that are easier to compare. - vMajor, vMinor, err := ParseMicroversion(version) - if err != nil { - return false, err - } - - // Check that the major version number is supported. - if (vMajor < supported.MinMajor) || (vMajor > supported.MaxMajor) { - return false, nil - } - - // Check that the minor version number is supported - if (vMinor <= supported.MaxMinor) && (vMinor >= supported.MinMinor) { - return true, nil - } - - return false, nil -} - -// ParseMicroversion parses the version major.minor into separate integers major and minor. -// For example, "2.53" becomes 2 and 53. -func ParseMicroversion(version string) (major int, minor int, err error) { - parts := strings.Split(version, ".") - if len(parts) != 2 { - return 0, 0, fmt.Errorf("invalid microversion format: %q", version) - } - major, err = strconv.Atoi(parts[0]) - if err != nil { - return 0, 0, err - } - minor, err = strconv.Atoi(parts[1]) - if err != nil { - return 0, 0, err - } - return major, minor, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go new file mode 100644 index 0000000000..86d1d14c34 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/discovery.go @@ -0,0 +1,372 @@ +package utils + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/gophercloud/gophercloud/v2" +) + +type Status string + +const ( + StatusCurrent Status = "CURRENT" + StatusSupported Status = "SUPPORTED" + StatusDeprecated Status = "DEPRECATED" + StatusExperimental Status = "EXPERIMENTAL" + StatusUnknown Status = "" +) + +// SupportedVersion stores a normalized form of the API version data. It handles APIs that +// support microversions as well as those that do not. +type SupportedVersion struct { + // Major is the major version number of the API + Major int + // Minor is the minor version number of the API + Minor int + // Status is the status of the API + Status Status + SupportedMicroversions +} + +// SupportedMicroversions stores a normalized form of the maximum and minimum API microversions +// supported by a given service. +type SupportedMicroversions struct { + // MaxMajor is the major version number of the maximum supported API microversion + MaxMajor int + // MaxMinor is the minor version number of the maximum supported API microversion + MaxMinor int + // MinMajor is the major version number of the minimum supported API microversion + MinMajor int + // MinMinor is the minor version number of the minimum supported API microversion + MinMinor int +} + +type version struct { + ID string `json:"id"` + Status string `json:"status"` + Version string `json:"version,omitempty"` + MaxVersion string `json:"max_version,omitempty"` + MinVersion string `json:"min_version"` +} + +type response struct { + Versions []version `json:"-"` +} + +func (r *response) UnmarshalJSON(in []byte) error { + // intermediateResponse is an intermediate struct that allows us to offload the difference + // between a single version document and a multi-version document to the json parser and + // only focus on differences in the latter + type intermediateResponse struct { + ID string `json:"id"` + Version *version `json:"version"` + Versions *json.RawMessage `json:"versions"` + } + + data := intermediateResponse{} + if err := json.Unmarshal(in, &data); err != nil { + return err + } + + // case 1: we have a single enveloped version object + // + // this is the approach used by Manila for single version responses + if data.Version != nil { + r.Versions = []version{*data.Version} + return nil + } + + // case 2: we have an singly enveloped array of version objects + // + // this is the approach used by nova, cinder and glance, among others, for multi-version + // responses + if data.Versions != nil { + var versionArr []version + if err := json.Unmarshal(*data.Versions, &versionArr); err == nil { + r.Versions = versionArr + return nil + } + } + + // case 3: we have an doubly enveloped array of version objects + // + // this is the approach used by keystone and barbican, among others, for multi-version + // responses + if data.Versions != nil { + type values struct { + Values []version `json:"values"` + } + + var valuesObj values + if err := json.Unmarshal(*data.Versions, &valuesObj); err == nil { + r.Versions = valuesObj.Values + return nil + } + } + + // case 4: we have a single unenveloped version object + // + // this is the approach used by most other services for single version responses + if data.ID != "" { + r.Versions = []version{{ID: data.ID}} + return nil + } + + return fmt.Errorf("failed to unmarshal versions document: %s", in) +} + +func extractVersion(endpointURL string) (int, int, error) { + u, err := url.Parse(endpointURL) + if err != nil { + return 0, 0, err + } + + parts := strings.Split(strings.TrimRight(u.Path, "/"), "/") + if len(parts) == 0 { + return 0, 0, fmt.Errorf("expected path with version, got: %s", u.Path) + } + + // first, check the nth path element for a version string + if majorVersion, minorVersion, err := ParseVersion(parts[len(parts)-1]); err == nil { + return majorVersion, minorVersion, nil + } + + // if there are no more parts, quit + if len(parts) == 1 { + // we don't return the error message directly since it might be misleading: at this point + // we might have a *malformed* version identifier rather than *no* version identifier + return 0, 0, fmt.Errorf("failed to infer version from path: %s", u.Path) + } + + // the guidelines say we should use the currently scoped project_id from the token, but we + // don't necessarily have a token yet so we speculatively look at the (n-1)th path element + // (but only that) just as keystoneauth does + // + // https://github.com/openstack/keystoneauth/blob/master/keystoneauth1/discover.py#L1534-L1545 + if majorVersion, minorVersion, err := ParseVersion(parts[len(parts)-1]); err == nil { + return majorVersion, minorVersion, err + } + + // once again, we don't return the error message directly + return 0, 0, fmt.Errorf("failed to infer version from path: %s", u.Path) +} + +// GetServiceVersions returns the versions supported by the ServiceClient Endpoint. +// If the endpoint resolves to an unversioned discovery API, this should return one or more supported versions. +// If the endpoint resolves to a versioned discovery API, this should return exactly one supported version. +func GetServiceVersions(ctx context.Context, client *gophercloud.ProviderClient, endpointURL string, discoverVersions bool) ([]SupportedVersion, error) { + var supportedVersions []SupportedVersion + var endpointVersion *SupportedVersion + + if majorVersion, minorVersion, err := extractVersion(endpointURL); err == nil { + endpointVersion = &SupportedVersion{Major: majorVersion, Minor: minorVersion} + if !discoverVersions { + return append(supportedVersions, *endpointVersion), nil + } + } + + var resp response + _, err := client.Request(ctx, "GET", endpointURL, &gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + if err != nil { + // we weren't able to find a discovery document but we have version information from the URL + if endpointVersion != nil { + return append(supportedVersions, *endpointVersion), nil + } + return supportedVersions, err + } + + versions := resp.Versions + + for _, version := range versions { + majorVersion, minorVersion, err := ParseVersion(version.ID) + if err != nil { + return supportedVersions, err + } + + status, err := ParseStatus(version.Status) + if err != nil { + return supportedVersions, err + } + + supportedVersion := SupportedVersion{ + Major: majorVersion, + Minor: minorVersion, + Status: status, + } + + // Only normalize the microversions if there are microversions to normalize + if (version.Version != "" || version.MaxVersion != "") && version.MinVersion != "" { + supportedVersion.MinMajor, supportedVersion.MinMinor, err = ParseMicroversion(version.MinVersion) + if err != nil { + return supportedVersions, err + } + + maxVersion := version.Version + if maxVersion == "" { + maxVersion = version.MaxVersion + } + supportedVersion.MaxMajor, supportedVersion.MaxMinor, err = ParseMicroversion(maxVersion) + if err != nil { + return supportedVersions, err + } + } + + supportedVersions = append(supportedVersions, supportedVersion) + } + + sort.Slice(supportedVersions, func(i, j int) bool { + return supportedVersions[i].Major > supportedVersions[j].Major || (supportedVersions[i].Major == supportedVersions[j].Major && + supportedVersions[i].Minor > supportedVersions[j].Minor) + }) + + return supportedVersions, nil +} + +// GetSupportedMicroversions returns the minimum and maximum microversion that is supported by the ServiceClient Endpoint. +func GetSupportedMicroversions(ctx context.Context, client *gophercloud.ServiceClient) (SupportedMicroversions, error) { + var supportedMicroversions SupportedMicroversions + + supportedVersions, err := GetServiceVersions(ctx, client.ProviderClient, client.Endpoint, true) + if err != nil { + return supportedMicroversions, err + } + + // If there are multiple versions then we were handed an unversioned endpoint. These don't + // provide microversion information, so we need to fail. Likewise, if there are no versions + // then something has gone wrong and we also need to fail. + if len(supportedVersions) > 1 { + return supportedMicroversions, fmt.Errorf("unversioned endpoint with multiple alternatives not supported") + } else if len(supportedVersions) == 0 { + return supportedMicroversions, fmt.Errorf("microversions not supported by endpoint") + } + + supportedMicroversions = supportedVersions[0].SupportedMicroversions + + if supportedMicroversions.MaxMajor == 0 && + supportedMicroversions.MaxMinor == 0 && + supportedMicroversions.MinMajor == 0 && + supportedMicroversions.MinMinor == 0 { + return supportedMicroversions, fmt.Errorf("microversions not supported by endpoint") + } + + return supportedMicroversions, err +} + +// RequireMicroversion checks that the required microversion is supported and +// returns a ServiceClient with the microversion set. +func RequireMicroversion(ctx context.Context, client gophercloud.ServiceClient, required string) (gophercloud.ServiceClient, error) { + supportedMicroversions, err := GetSupportedMicroversions(ctx, &client) + if err != nil { + return client, fmt.Errorf("unable to determine supported microversions: %w", err) + } + supported, err := supportedMicroversions.IsSupported(required) + if err != nil { + return client, err + } + if !supported { + return client, fmt.Errorf("microversion %s not supported. Supported versions: %v", required, supportedMicroversions) + } + client.Microversion = required + return client, nil +} + +// IsSupported checks if a microversion falls in the supported interval. +// It returns true if the version is within the interval and false otherwise. +func (supported SupportedMicroversions) IsSupported(version string) (bool, error) { + // Parse the version X.Y into X and Y integers that are easier to compare. + vMajor, vMinor, err := ParseMicroversion(version) + if err != nil { + return false, err + } + + // Check that the major version number is supported. + if (vMajor < supported.MinMajor) || (vMajor > supported.MaxMajor) { + return false, nil + } + + // Check that the minor version number is supported + if (vMinor <= supported.MaxMinor) && (vMinor >= supported.MinMinor) { + return true, nil + } + + return false, nil +} + +// ParseVersion parsed the version strings v{MAJOR} and v{MAJOR}.{MINOR} into separate integers +// major and minor. +// For example, "v2.1" becomes 2 and 1, "v3" becomes 3 and 0, and "1" becomes 1 and 0. +func ParseVersion(version string) (major, minor int, err error) { + if version == "" { + return 0, 0, fmt.Errorf("empty version provided") + } + + // We use the regex indicated by the version discovery guidelines. + // + // https://specs.openstack.org/openstack/api-sig/guidelines/consuming-catalog/version-discovery.html#inferring-version + // + // However, we diverge slightly since not all services include the 'v' prefix (glares at zaqar) + versionRe := regexp.MustCompile(`^v?(?P[0-9]+)(\.(?P[0-9]+))?$`) + + match := versionRe.FindStringSubmatch(version) + if len(match) == 0 { + return 0, 0, fmt.Errorf("invalid format: %q", version) + } + + major, err = strconv.Atoi(match[versionRe.SubexpIndex("major")]) + if err != nil { + return 0, 0, err + } + + minor = 0 + if match[versionRe.SubexpIndex("minor")] != "" { + minor, err = strconv.Atoi(match[versionRe.SubexpIndex("minor")]) + if err != nil { + return 0, 0, err + } + } + + return major, minor, nil +} + +// ParseMicroversion parses the version major.minor into separate integers major and minor. +// For example, "2.53" becomes 2 and 53. +func ParseMicroversion(version string) (major int, minor int, err error) { + parts := strings.Split(version, ".") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid microversion format: %q", version) + } + major, err = strconv.Atoi(parts[0]) + if err != nil { + return 0, 0, err + } + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return 0, 0, err + } + return major, minor, nil +} + +func ParseStatus(status string) (Status, error) { + switch strings.ToUpper(status) { + case "CURRENT", "STABLE": // keystone uses STABLE instead of CURRENT + return StatusCurrent, nil + case "SUPPORTED": + return StatusSupported, nil + case "DEPRECATED": + return StatusDeprecated, nil + case "": + return StatusUnknown, nil + default: + return StatusUnknown, fmt.Errorf("invalid status: %q", status) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go index 52fcd38ab3..9048e83def 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go @@ -7,13 +7,14 @@ import ( "errors" "io" "net/http" + "slices" "strings" "sync" ) // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v2.7.0" + DefaultUserAgent = "gophercloud/v2.9.0" DefaultMaxBackoffRetries = 60 ) @@ -437,16 +438,8 @@ func (client *ProviderClient) doRequest(ctx context.Context, method, url string, okc = defaultOkCodes(method) } - // Validate the HTTP response status. - var ok bool - for _, code := range okc { - if resp.StatusCode == code { - ok = true - break - } - } - - if !ok { + // Check the response code against the acceptable codes + if !slices.Contains(okc, resp.StatusCode) { body, _ := io.ReadAll(resp.Body) resp.Body.Close() respErr := ErrUnexpectedResponseCode{ diff --git a/vendor/github.com/gophercloud/gophercloud/v2/service_client.go b/vendor/github.com/gophercloud/gophercloud/v2/service_client.go index c1f9f41d4d..015c3f2339 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/service_client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/service_client.go @@ -130,6 +130,9 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion case "baremetal-introspection": opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion + case "container-infrastructure-management", "container-infrastructure", "container-infra": + // magnum should accept container-infrastructure-management but (as of Epoxy) does not + serviceType = "container-infra" } if client.Type != "" { diff --git a/vendor/github.com/gophercloud/gophercloud/v2/util.go b/vendor/github.com/gophercloud/gophercloud/v2/util.go index ad8a7dfaaa..d11a723b1b 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/util.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/util.go @@ -37,9 +37,6 @@ func NormalizePathURL(basePath, rawPath string) (string, error) { absPathSys = filepath.Join(basePath, rawPath) u.Path = filepath.ToSlash(absPathSys) - if err != nil { - return "", err - } u.Scheme = "file" return u.String(), nil } diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE index 1409d6ab92..bb1e9a486a 100644 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014 HashiCorp, Inc. +Copyright IBM Corp. 2014, 2025 Mozilla Public License, version 2.0 diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 4b7806cd96..83a8249f72 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,6 +1,7 @@ # Versioning Library for Go + ![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) -[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-version.svg)](https://pkg.go.dev/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version @@ -12,7 +13,7 @@ Versions used with go-version must follow [SemVer](http://semver.org/). ## Installation and Usage Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). +[Go Reference](https://pkg.go.dev/github.com/hashicorp/go-version). Installation can be done with a normal `go get`: diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 29bdc4d2b5..3964da070d 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version @@ -8,8 +8,26 @@ import ( "regexp" "sort" "strings" + "sync" ) +var ( + constraintRegexp *regexp.Regexp + constraintRegexpOnce sync.Once +) + +func getConstraintRegexp() *regexp.Regexp { + constraintRegexpOnce.Do(func() { + // This heavy lifting only happens the first time this function is called + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + `<=|>=|!=|~>|<|>|=|`, + VersionRegexpRaw, + )) + }) + return constraintRegexp +} + // Constraint represents a single constraint for a version, such as // ">= 1.0". type Constraint struct { @@ -29,38 +47,11 @@ type Constraints []*Constraint type constraintFunc func(v, c *Version) bool -var constraintOperators map[string]constraintOperation - type constraintOperation struct { op operator f constraintFunc } -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintOperation{ - "": {op: equal, f: constraintEqual}, - "=": {op: equal, f: constraintEqual}, - "!=": {op: notEqual, f: constraintNotEqual}, - ">": {op: greaterThan, f: constraintGreaterThan}, - "<": {op: lessThan, f: constraintLessThan}, - ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, - "<=": {op: lessThanEqual, f: constraintLessThanEqual}, - "~>": {op: pessimistic, f: constraintPessimistic}, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - // NewConstraint will parse one or more constraints from the given // constraint string. The string must be a comma-separated list of // constraints. @@ -107,7 +98,7 @@ func (cs Constraints) Check(v *Version) bool { // to '>0.2' it is *NOT* treated as equal. // // Missing operator is treated as equal to '=', whitespaces -// are ignored and constraints are sorted before comaparison. +// are ignored and constraints are sorted before comparison. func (cs Constraints) Equals(c Constraints) bool { if len(cs) != len(c) { return false @@ -176,9 +167,9 @@ func (c *Constraint) String() string { } func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) + matches := getConstraintRegexp().FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) + return nil, fmt.Errorf("malformed constraint: %s", v) } check, err := NewVersion(matches[2]) @@ -186,7 +177,25 @@ func parseSingle(v string) (*Constraint, error) { return nil, err } - cop := constraintOperators[matches[1]] + var cop constraintOperation + switch matches[1] { + case "=": + cop = constraintOperation{op: equal, f: constraintEqual} + case "!=": + cop = constraintOperation{op: notEqual, f: constraintNotEqual} + case ">": + cop = constraintOperation{op: greaterThan, f: constraintGreaterThan} + case "<": + cop = constraintOperation{op: lessThan, f: constraintLessThan} + case ">=": + cop = constraintOperation{op: greaterThanEqual, f: constraintGreaterThanEqual} + case "<=": + cop = constraintOperation{op: lessThanEqual, f: constraintLessThanEqual} + case "~>": + cop = constraintOperation{op: pessimistic, f: constraintPessimistic} + default: + cop = constraintOperation{op: equal, f: constraintEqual} + } return &Constraint{ f: cop.f, diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 7c683c2813..17b29732ee 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -1,23 +1,39 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version import ( - "bytes" "database/sql/driver" "fmt" "regexp" "strconv" "strings" + "sync" ) // The compiled regular expression used to test the validity of a version. var ( - versionRegexp *regexp.Regexp - semverRegexp *regexp.Regexp + versionRegexp *regexp.Regexp + versionRegexpOnce sync.Once + semverRegexp *regexp.Regexp + semverRegexpOnce sync.Once ) +func getVersionRegexp() *regexp.Regexp { + versionRegexpOnce.Do(func() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + }) + return versionRegexp +} + +func getSemverRegexp() *regexp.Regexp { + semverRegexpOnce.Do(func() { + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") + }) + return semverRegexp +} + // The raw regular expression string used for testing the validity // of a version. const ( @@ -42,28 +58,23 @@ type Version struct { original string } -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") - semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") -} - // NewVersion parses the given version and returns a new // Version. func NewVersion(v string) (*Version, error) { - return newVersion(v, versionRegexp) + return newVersion(v, getVersionRegexp()) } // NewSemver parses the given version and returns a new // Version that adheres strictly to SemVer specs // https://semver.org/ func NewSemver(v string) (*Version, error) { - return newVersion(v, semverRegexp) + return newVersion(v, getSemverRegexp()) } func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { matches := pattern.FindStringSubmatch(v) if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) + return nil, fmt.Errorf("malformed version: %s", v) } segmentsStr := strings.Split(matches[1], ".") segments := make([]int64, len(segmentsStr)) @@ -71,7 +82,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { val, err := strconv.ParseInt(str, 10, 64) if err != nil { return nil, fmt.Errorf( - "Error parsing version: %s", err) + "error parsing version: %s", err) } segments[i] = val @@ -174,7 +185,7 @@ func (v *Version) Compare(other *Version) int { } else if lhs < rhs { return -1 } - // Otherwis, rhs was > lhs, they're not equal + // Otherwise, rhs was > lhs, they're not equal return 1 } @@ -382,22 +393,29 @@ func (v *Version) Segments64() []int64 { // missing parts (1.0 => 1.0.0) will be made into a canonicalized form // as shown in the parenthesized examples. func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) + return string(v.bytes()) +} + +func (v *Version) bytes() []byte { + var buf []byte for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str + if i > 0 { + buf = append(buf, '.') + } + buf = strconv.AppendInt(buf, s, 10) } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) + buf = append(buf, '-') + buf = append(buf, v.pre...) } + if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) + buf = append(buf, '+') + buf = append(buf, v.metadata...) } - return buf.String() + return buf } // Original returns the original parsed version as-is, including any diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go index 83547fe13d..11bc8b1c56 100644 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -1,4 +1,4 @@ -// Copyright (c) HashiCorp, Inc. +// Copyright IBM Corp. 2014, 2025 // SPDX-License-Identifier: MPL-2.0 package version diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 69b15d1848..9d1bb914b6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,112 @@ +## 2.27.3 + +### Fixes +report exit result in case of failure [1c9f356] +fix data race [ece19c8] + +## 2.27.2 + +### Fixes +- inline automaxprocs to simplify dependencies; this will be removed when Go 1.26 comes out [a69113a] + +### Maintenance +- Fix syntax errors and typo [a99c6e0] +- Fix paragraph position error [f993df5] + +## 2.27.1 + +### Fixes +- Fix Ginkgo Reporter slice-bounds panic [606c1cb] +- Bug Fix: Add GinkoTBWrapper.Attr() and GinkoTBWrapper.Output() [a6463b3] + +## 2.27.0 + +### Features + +#### Transforming Nodes during Tree Construction + +This release adds support for `NodeArgsTransformer`s that can be registered with `AddTreeConstructionNodeArgsTransformer`. + +These are called during the tree construction phase as nodes are constructed and can modify the node strings and decorators. This enables frameworks built on top of Ginkgo to modify Ginkgo nodes and enforce conventions. + +Learn more [here](https://onsi.github.io/ginkgo/#advanced-transforming-node-arguments-during-tree-construction). + +#### Spec Prioritization + +A new `SpecPriority(int)` decorator has been added. Ginkgo will honor priority when ordering specs, ensuring that higher priority specs start running before lower priority specs + +Learn more [here](https://onsi.github.io/ginkgo/#prioritizing-specs). + +### Maintenance +- Bump rexml from 3.4.0 to 3.4.2 in /docs (#1595) [1333dae] +- Bump github.com/gkampitakis/go-snaps from 0.5.14 to 0.5.15 (#1600) [17ae63e] + +## 2.26.0 + +### Features + +Ginkgo can now generate json-formatted reports that are compatible with the `go test` json format. Use `ginkgo --gojson-report=report.go.json`. This is not intended to be a replacement for Ginkgo's native json format which is more information rich and better models Ginkgo's test structure semantics. + +## 2.25.3 + +### Fixes + +- emit --github-output group only for progress report itself [f01aed1] + +## 2.25.2 + +### Fixes +Add github output group for progress report content + +### Maintenance +Bump Gomega + +## 2.25.1 + +### Fixes +- fix(types): ignore nameless nodes on FullText() [10866d3] +- chore: fix some CodeQL warnings [2e42cff] + +## 2.25.0 + +### `AroundNode` + +This release introduces a new decorator to support more complex spec setup usecases. + +`AroundNode` registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information and some examples. + +Allowed signatures: + +- `AroundNode(func())` - `func` will be called before the node is run. +- `AroundNode(func(ctx context.Context) context.Context)` - `func` can wrap the passed in context and return a new one which will be passed on to the node. +- `AroundNode(func(ctx context.Context, body func(ctx context.Context)))` - `ctx` is the context for the node and `body` is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple `AroundNode` decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like `BeforeEach` and `DeferCleanup`, `AroundNode` is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call `runtime.LockOSThread()` in the `AroundNode` to ensure that the node runs on a single thread). + +Since `AroundNode` allows you to modify the context you can also use `AroundNode` to implement shared setup that attaches values to the context. + +If applied to a container, `AroundNode` will run before every node in the container. Including setup nodes like `BeforeEach` and `DeferCleanup`. + +`AroundNode` can also be applied to `RunSpecs` to run before every node in the suite. This opens up new mechanisms for instrumenting individual nodes across an entire suite. + +## 2.24.0 + +### Features + +Specs can now be decorated with (e.g.) `SemVerConstraint("2.1.0")` and `ginkgo --sem-ver-filter="2.1.1"` will only run constrained specs that match the requested version. Learn more in the docs [here](https://onsi.github.io/ginkgo/#spec-semantic-version-filtering)! Thanks to @Icarus9913 for the PR. + +### Fixes + +- remove -o from run command [3f5d379]. fixes [#1582](https://github.com/onsi/ginkgo/issues/1582) + +### Maintenance + +Numerous dependency bumps and documentation fixes + ## 2.23.4 Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container. Thanks to @emirot for the fix! diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index e3d0c13cc6..7b7ab9e39c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -113,3 +113,13 @@ Ginkgo is MIT-Licensed ## Contributing See [CONTRIBUTING.md](CONTRIBUTING.md) + +## Sponsors + +Sponsors commit to a [sponsorship](https://github.com/sponsors/onsi) for a year. If you're an organization that makes use of Ginkgo please consider becoming a sponsor! + +

Browser testing via + + + +

diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index d027bdff93..7e165e4738 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -186,6 +186,20 @@ func GinkgoLabelFilter() string { return suiteConfig.LabelFilter } +/* +GinkgoSemVerFilter() returns the semantic version filter configured for this suite via `--sem-ver-filter`. + +You can use this to manually check if a set of semantic version constraints would satisfy the filter via: + + if (SemVerConstraint("> 2.6.0", "< 2.8.0").MatchesSemVerFilter(GinkgoSemVerFilter())) { + //... + } +*/ +func GinkgoSemVerFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.SemVerFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -254,7 +268,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) var reporter reporters.Reporter if suiteConfig.ParallelTotal == 1 { @@ -297,7 +311,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -316,8 +330,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { return passed } -func extractSuiteConfiguration(args []any) Labels { +func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) { suiteLabels := Labels{} + suiteSemVerConstraints := SemVerConstraints{} + aroundNodes := types.AroundNodes{} configErrors := []error{} for _, arg := range args { switch arg := arg.(type) { @@ -327,6 +343,10 @@ func extractSuiteConfiguration(args []any) Labels { reporterConfig = arg case Labels: suiteLabels = append(suiteLabels, arg...) + case SemVerConstraints: + suiteSemVerConstraints = append(suiteSemVerConstraints, arg...) + case types.AroundNodeDecorator: + aroundNodes = append(aroundNodes, arg) default: configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) } @@ -342,7 +362,7 @@ func extractSuiteConfiguration(args []any) Labels { os.Exit(1) } - return suiteLabels + return suiteLabels, suiteSemVerConstraints, aroundNodes } func getwd() (string, error) { @@ -365,7 +385,7 @@ func PreviewSpecs(description string, args ...any) Report { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1 defer func() { @@ -383,7 +403,7 @@ func PreviewSpecs(description string, args ...any) Report { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) return global.Suite.GetPreviewReport() } @@ -481,6 +501,38 @@ func pushNode(node internal.Node, errors []error) bool { return true } +// NodeArgsTransformer is a hook which is called by the test construction DSL methods +// before creating the new node. If it returns any error, the test suite +// prints those errors and exits. The text and arguments can be modified, +// which includes directly changing the args slice that is passed in. +// Arguments have been flattened already, i.e. none of the entries in args is another []any. +// The result may be nested. +// +// The node type is provided for information and remains the same. +// +// The offset is valid for calling NewLocation directly in the +// implementation of TransformNodeArgs to find the location where +// the Ginkgo DSL function is called. An additional offset supplied +// by the caller via args is already included. +// +// A NodeArgsTransformer can be registered with AddTreeConstructionNodeArgsTransformer. +type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error) + +// AddTreeConstructionNodeArgsTransformer registers a NodeArgsTransformer. +// Only nodes which get created after registering a NodeArgsTransformer +// are transformed by it. The returned function can be called to +// unregister the transformer. +// +// Both may only be called during the construction phase. +// +// If there is more than one registered transformer, then the most +// recently added ones get called first. +func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() { + // This conversion could be avoided with a type alias, but type aliases make + // developer documentation less useful. + return internal.AddTreeConstructionNodeArgsTransformer(internal.NodeArgsTransformer(transformer)) +} + /* Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It). @@ -492,7 +544,7 @@ You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-conta In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ func Describe(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -500,7 +552,7 @@ FDescribe focuses specs within the Describe block. */ func FDescribe(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -508,7 +560,7 @@ PDescribe marks specs within the Describe block as pending. */ func PDescribe(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -521,21 +573,21 @@ var XDescribe = PDescribe /* Context is an alias for Describe - it generates the exact same kind of Container node */ var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe -/* When is an alias for Describe - it generates the exact same kind of Container node */ +/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */ func When(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } -/* When is an alias for Describe - it generates the exact same kind of Container node */ +/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */ func FWhen(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } /* When is an alias for Describe - it generates the exact same kind of Container node */ func PWhen(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } var XWhen = PWhen @@ -551,7 +603,7 @@ You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ func It(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -559,7 +611,7 @@ FIt allows you to focus an individual It. */ func FIt(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -567,7 +619,7 @@ PIt allows you to mark an individual It as pending. */ func PIt(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -614,7 +666,7 @@ You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup- func BeforeSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))) } /* @@ -633,7 +685,7 @@ You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup- func AfterSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))) } /* @@ -671,7 +723,7 @@ func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) combinedArgs := []any{process1Body, allProcessBody} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))) } /* @@ -691,7 +743,7 @@ func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) b combinedArgs := []any{allProcessBody, process1Body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))) } /* @@ -704,7 +756,7 @@ You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ func BeforeEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeEach, "", args...))) } /* @@ -717,7 +769,7 @@ You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ func JustBeforeEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))) } /* @@ -732,7 +784,7 @@ You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ func AfterEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterEach, "", args...))) } /* @@ -744,7 +796,7 @@ You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ func JustAfterEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustAfterEach, "", args...))) } /* @@ -759,7 +811,7 @@ You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#o And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ func BeforeAll(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeAll, "", args...))) } /* @@ -776,7 +828,7 @@ You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#o And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ func AfterAll(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterAll, "", args...))) } /* diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index c65af4ce1c..e331d7cf8c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -99,6 +100,23 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels +/* +SemVerConstraint decorates specs with SemVerConstraints. Multiple semantic version constraints can be passed to SemVerConstraint and these strings must follow the semantic version constraint rules. +SemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple SemVerConstraints to a given node and a spec's semantic version constraints is the union of all semantic version constraints in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func SemVerConstraint(semVerConstraints ...string) SemVerConstraints { + return SemVerConstraints(semVerConstraints) +} + +/* +SemVerConstraints are the type for spec SemVerConstraint decorators. Use SemVerConstraint(...) to construct SemVerConstraints. +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +*/ +type SemVerConstraints = internal.SemVerConstraints + /* PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. @@ -136,8 +154,40 @@ Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will pro */ type GracePeriod = internal.GracePeriod +/* +SpecPriority allows you to assign a priority to a spec or container. + +Specs with higher priority will be scheduled to run before specs with lower priority. The default priority is 0 and negative priorities are allowed. +*/ +type SpecPriority = internal.SpecPriority + /* SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. */ const SuppressProgressReporting = internal.SuppressProgressReporting + +/* +AroundNode registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information. + +Allowed signatures: + +- AroundNode(func()) - func will be called before the node is run. +- AroundNode(func(ctx context.Context) context.Context) - func can wrap the passed in context and return a new one which will be passed on to the node. +- AroundNode(func(ctx context.Context, body func(ctx context.Context))) - ctx is the context for the node and body is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple AroundNode decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like BeforeEach and DeferCleanup, AroundNode is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call runtime.LockOSThread() in the AroundNode to ensure that the node runs on a single thread). + +Since AroundNode allows you to modify the context you can also use AroundNode to implement shared setup that attaches values to the context. You must return a context that inherits from the passed in context. + +If applied to a container, AroundNode will run before every node in the container. Including setup nodes like BeforeEach and DeferCleanup. + +AroundNode can also be applied to RunSpecs to run before every node in the suite. +*/ +func AroundNode[F types.AroundNodeAllowedFuncs](f F) types.AroundNodeDecorator { + return types.AroundNode(f, types.NewCodeLocation(1)) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go new file mode 100644 index 0000000000..ee6ac7b5f3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go @@ -0,0 +1,8 @@ +//go:build !go1.25 +// +build !go1.25 + +package main + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo/automaxprocs" +) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md new file mode 100644 index 0000000000..e249ebe8b3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md @@ -0,0 +1,3 @@ +This entire directory is a lightly modified clone of https://github.com/uber-go/automaxprocs + +It will be removed when Go 1.26 ships and we no longer need to support Go 1.24 (which does not correctly autodetect maxprocs in containers). diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go new file mode 100644 index 0000000000..8a762b51d6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package automaxprocs + +import ( + "os" + "runtime" +) + +func init() { + Set() +} + +const _maxProcsKey = "GOMAXPROCS" + +type config struct { + procs func(int, func(v float64) int) (int, CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int +} + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set() error { + cfg := &config{ + procs: CPUQuotaToGOMAXPROCS, + roundQuotaFunc: DefaultRoundFunc, + minGOMAXPROCS: 1, + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if _, exists := os.LookupEnv(_maxProcsKey); exists { + return nil + } + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) + if err != nil { + return err + } + if status == CPUQuotaUndefined { + return nil + } + runtime.GOMAXPROCS(maxProcs) + return nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go index fe4ecf561e..a4676933e8 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go similarity index 99% rename from openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go index e89f543602..ed384891ef 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs const ( // _cgroupFSType is the Linux CGroup file system type used in diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go similarity index 99% rename from openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go index 78556062fe..69a0be6b71 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go similarity index 91% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go index f9057fd273..2d83343bd9 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go @@ -21,12 +21,10 @@ //go:build linux // +build linux -package runtime +package automaxprocs import ( "errors" - - cg "go.uber.org/automaxprocs/internal/cgroups" ) // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process @@ -58,8 +56,8 @@ type queryer interface { } var ( - _newCgroups2 = cg.NewCGroups2ForCurrentProcess - _newCgroups = cg.NewCGroupsForCurrentProcess + _newCgroups2 = NewCGroups2ForCurrentProcess + _newCgroups = NewCGroupsForCurrentProcess _newQueryer = newQueryer ) @@ -68,7 +66,7 @@ func newQueryer() (queryer, error) { if err == nil { return cgroups, nil } - if errors.Is(err, cg.ErrNotV2) { + if errors.Is(err, ErrNotV2) { return _newCgroups() } return nil, err diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go similarity index 98% rename from openshift/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go index e74701508e..d2d61e8941 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go @@ -21,7 +21,7 @@ //go:build !linux // +build !linux -package runtime +package automaxprocs // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process // to a valid GOMAXPROCS value. This is Linux-specific and not supported in the diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go similarity index 98% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go index 94ac75a46e..2e235d7d65 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import "fmt" diff --git a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go similarity index 99% rename from openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go index f3877f78aa..7c3fa306ef 100644 --- a/openshift/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go similarity index 98% rename from hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go index f8a2834ac0..b8ec7e502a 100644 --- a/hack/tools/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package runtime +package automaxprocs import "math" diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go index cddc3eaec3..881ebd5902 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 2b36b2feb9..3021dfec2e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -29,7 +29,6 @@ func BuildBuildCommand() command.Command { var errors []error cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) command.AbortIfErrors("Ginkgo detected configuration issues:", errors) - buildSpecs(args, cliConfig, goFlagsConfig) }, } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 8e16d2bb03..f3439a3f0c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -90,6 +90,9 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC if reporterConfig.JSONReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) } + if reporterConfig.GoJSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.GoJSONReport, GenerateFunc: reporters.GenerateGoTestJSONReport, MergeFunc: reporters.MergeAndCleanupGoTestJSONReports}) + } if reporterConfig.JUnitReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go index 41052ea19d..48c69a1d83 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -9,6 +9,7 @@ import ( "path/filepath" "regexp" "strings" + "sync/atomic" "syscall" "time" @@ -107,6 +108,9 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } @@ -156,12 +160,15 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { type procResult struct { + proc int + exitResult string passed bool hasProgrammaticFocus bool } numProcs := cliConfig.ComputedProcs() procOutput := make([]*bytes.Buffer, numProcs) + procExitResult := make([]string, numProcs) coverProfiles := []string{} blockProfiles := []string{} @@ -179,6 +186,9 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } @@ -218,16 +228,20 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig args = append(args, additionalArgs...) cmd, buf := buildAndStartCommand(suite, args, false) + var exited atomic.Bool procOutput[proc-1] = buf - server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + server.RegisterAlive(proc, func() bool { return !exited.Load() }) go func() { cmd.Wait() exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() procResults <- procResult{ + proc: proc, + exitResult: cmd.ProcessState.String(), passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, } + exited.Store(true) }() } @@ -236,6 +250,7 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig result := <-procResults passed = passed && result.passed suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + procExitResult[result.proc-1] = result.exitResult } if passed { suite.State = TestSuiteStatePassed @@ -255,6 +270,8 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Exit result of proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s\n", procExitResult[proc-1])) } fmt.Fprintf(os.Stderr, "** End **") } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go index bd6b8fbff3..419589b48c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - _ "go.uber.org/automaxprocs" "github.com/onsi/ginkgo/v2/ginkgo/build" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/generators" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index a34d94354d..75cbdb4962 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -2,12 +2,9 @@ package watch import ( "go/build" - "regexp" + "strings" ) -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) -var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing - type Dependencies struct { deps map[string]int } @@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) { d.addDepIfNotPresent(pkg.Dir, depth) } } @@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) { d.deps[dep] = depth } } + +func matchesGinkgoOrGomega(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega") +} + +func matchesGinkgoIntegration(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 993279de29..40d1e1ab5c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "context" + "io" "testing" "github.com/onsi/ginkgo/v2/internal/testingtproxy" @@ -69,6 +70,8 @@ type GinkgoTInterface interface { Skipf(format string, args ...any) Skipped() bool TempDir() string + Attr(key, value string) + Output() io.Writer } /* @@ -187,3 +190,9 @@ func (g *GinkgoTBWrapper) Skipped() bool { func (g *GinkgoTBWrapper) TempDir() string { return g.GinkgoT.TempDir() } +func (g *GinkgoTBWrapper) Attr(key, value string) { + g.GinkgoT.Attr(key, value) +} +func (g *GinkgoTBWrapper) Output() io.Writer { + return g.GinkgoT.Output() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go new file mode 100644 index 0000000000..c965710205 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go @@ -0,0 +1,34 @@ +package internal + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +func ComputeAroundNodes(specs Specs) Specs { + out := Specs{} + for _, spec := range specs { + nodes := Nodes{} + currentNestingLevel := 0 + aroundNodes := types.AroundNodes{} + nestingLevelIndices := []int{} + for _, node := range spec.Nodes { + switch node.NodeType { + case types.NodeTypeContainer: + currentNestingLevel = node.NestingLevel + 1 + nestingLevelIndices = append(nestingLevelIndices, len(aroundNodes)) + aroundNodes = aroundNodes.Append(node.AroundNodes...) + nodes = append(nodes, node) + default: + if currentNestingLevel > node.NestingLevel { + currentNestingLevel = node.NestingLevel + aroundNodes = aroundNodes[:nestingLevelIndices[currentNestingLevel]] + } + node.AroundNodes = types.AroundNodes{}.Append(aroundNodes...).Append(node.AroundNodes...) + nodes = append(nodes, node) + } + } + spec.Nodes = nodes + out = append(out, spec) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index e3da7d14dd..a39daf5a60 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic *Note:* specs with pending nodes are Skipped when created by NewSpec. */ -func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") @@ -84,6 +84,13 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit }) } + if suiteConfig.SemVerFilter != "" { + semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints())) + }) + } + if len(suiteConfig.FocusFiles) > 0 { focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index 02c9fe4fcd..cc794903e7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -110,21 +110,53 @@ func newGroup(suite *Suite) *group { } } +// initialReportForSpec constructs a new SpecReport right before running the spec. func (g *group) initialReportForSpec(spec Spec) types.SpecReport { return types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: g.suite.config.ParallelProcess, - RunningInParallel: g.suite.isRunningInParallel(), - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), - MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints), + ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + SpecPriority: spec.Nodes.GetSpecPriority(), + } +} + +// constructionNodeReportForTreeNode constructs a new SpecReport right before invoking the body +// of a container node during construction of the full tree. +func constructionNodeReportForTreeNode(node *TreeNode) *types.ConstructionNodeReport { + var report types.ConstructionNodeReport + // Walk up the tree and set attributes accordingly. + addNodeToReportForNode(&report, node) + return &report +} + +// addNodeToReportForNode is conceptually similar to initialReportForSpec and therefore placed here +// although it doesn't do anything with a group. +func addNodeToReportForNode(report *types.ConstructionNodeReport, node *TreeNode) { + if node.Parent != nil { + // First add the parent node, then the current one. + addNodeToReportForNode(report, node.Parent) + } + report.ContainerHierarchyTexts = append(report.ContainerHierarchyTexts, node.Node.Text) + report.ContainerHierarchyLocations = append(report.ContainerHierarchyLocations, node.Node.CodeLocation) + report.ContainerHierarchyLabels = append(report.ContainerHierarchyLabels, node.Node.Labels) + report.ContainerHierarchySemVerConstraints = append(report.ContainerHierarchySemVerConstraints, node.Node.SemVerConstraints) + if node.Node.MarkedSerial { + report.IsSerial = true + } + if node.Node.MarkedOrdered { + report.IsInOrderedContainer = true } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 8096950b6c..2bccec2dbf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "reflect" + "slices" "sort" "sync" "time" @@ -46,20 +47,24 @@ type Node struct { ReportEachBody func(SpecContext, types.SpecReport) ReportSuiteBody func(SpecContext, types.Report) - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedContinueOnFailure bool - MarkedOncePerOrdered bool - FlakeAttempts int - MustPassRepeatedly int - Labels Labels - PollProgressAfter time.Duration - PollProgressInterval time.Duration - NodeTimeout time.Duration - SpecTimeout time.Duration - GracePeriod time.Duration + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedContinueOnFailure bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + SemVerConstraints SemVerConstraints + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration + AroundNodes types.AroundNodes + HasExplicitlySetSpecPriority bool + SpecPriority int NodeIDWhereCleanupWasGenerated uint } @@ -85,31 +90,47 @@ type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint type Done chan<- any // Deprecated Done Channel for asynchronous testing -type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration +type SpecPriority int + +type Labels []string func (l Labels) MatchesLabelFilter(query string) bool { return types.MustParseLabelFilter(query)(l) } -func UnionOfLabels(labels ...Labels) Labels { - out := Labels{} - seen := map[string]bool{} - for _, labelSet := range labels { - for _, label := range labelSet { - if !seen[label] { - seen[label] = true - out = append(out, label) +type SemVerConstraints []string + +func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool { + return types.MustParseSemVerFilter(version)(svc) +} + +func unionOf[S ~[]E, E comparable](slices ...S) S { + out := S{} + seen := map[E]bool{} + for _, slice := range slices { + for _, item := range slice { + if !seen[item] { + seen[item] = true + out = append(out, item) } } } return out } +func UnionOfLabels(labels ...Labels) Labels { + return unionOf(labels...) +} + +func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerConstraints { + return unionOf(semVerConstraints...) +} + func PartitionDecorations(args ...any) ([]any, []any) { decorations := []any{} remainingArgs := []any{} @@ -151,6 +172,8 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(SemVerConstraints{}): + return true case t == reflect.TypeOf(PollProgressInterval(0)): return true case t == reflect.TypeOf(PollProgressAfter(0)): @@ -161,6 +184,10 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(GracePeriod(0)): return true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + return true + case t == reflect.TypeOf(SpecPriority(0)): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -191,6 +218,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy NodeType: nodeType, Text: text, Labels: Labels{}, + SemVerConstraints: SemVerConstraints{}, CodeLocation: types.NewCodeLocation(baseOffset), NestingLevel: -1, PollProgressAfter: -1, @@ -205,7 +233,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - args = unrollInterfaceSlice(args) + args = UnrollInterfaceSlice(args) remainingArgs := []any{} // First get the CodeLocation up-to-date @@ -221,6 +249,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } labelsSeen := map[string]bool{} + semVerConstraintsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs remainingArgs = []any{} @@ -299,6 +328,14 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) } + case t == reflect.TypeOf(SpecPriority(0)): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecPriority")) + } + node.SpecPriority = int(arg.(SpecPriority)) + node.HasExplicitlySetSpecPriority = true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + node.AroundNodes = append(node.AroundNodes, arg.(types.AroundNodeDecorator)) case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -311,6 +348,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(err) } } + case t == reflect.TypeOf(SemVerConstraints{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SemVerConstraint")) + } + for _, semVerConstraint := range arg.(SemVerConstraints) { + if !semVerConstraintsSeen[semVerConstraint] { + semVerConstraintsSeen[semVerConstraint] = true + semVerConstraint, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation) + node.SemVerConstraints = append(node.SemVerConstraints, semVerConstraint) + appendError(err) + } + } case t.Kind() == reflect.Func: if nodeType.Is(types.NodeTypeContainer) { if node.Body != nil { @@ -599,7 +648,7 @@ func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(stri }) } - return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) + return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs) } func (n Node) IsZero() bool { @@ -824,6 +873,32 @@ func (n Nodes) UnionOfLabels() []string { return out } +func (n Nodes) SemVerConstraints() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].SemVerConstraints == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].SemVerConstraints) + } + } + return out +} + +func (n Nodes) UnionOfSemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, constraint := range n[i].SemVerConstraints { + if !seen[constraint] { + seen[constraint] = true + out = append(out, constraint) + } + } + } + return out +} + func (n Nodes) CodeLocations() []types.CodeLocation { out := make([]types.CodeLocation, len(n)) for i := range n { @@ -920,7 +995,16 @@ func (n Nodes) GetMaxMustPassRepeatedly() int { return maxMustPassRepeatedly } -func unrollInterfaceSlice(args any) []any { +func (n Nodes) GetSpecPriority() int { + for i := len(n) - 1; i >= 0; i-- { + if n[i].HasExplicitlySetSpecPriority { + return n[i].SpecPriority + } + } + return 0 +} + +func UnrollInterfaceSlice(args any) []any { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { return []any{args} @@ -928,11 +1012,67 @@ func unrollInterfaceSlice(args any) []any { out := []any{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) - if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { - out = append(out, unrollInterfaceSlice(el.Interface())...) + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) && el.Type() != reflect.TypeOf(SemVerConstraints{}) { + out = append(out, UnrollInterfaceSlice(el.Interface())...) } else { out = append(out, v.Index(i).Interface()) } } return out } + +type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error) + +func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() { + id := nodeArgsTransformerCounter + nodeArgsTransformerCounter++ + nodeArgsTransformers = append(nodeArgsTransformers, registeredNodeArgsTransformer{id, transformer}) + return func() { + nodeArgsTransformers = slices.DeleteFunc(nodeArgsTransformers, func(transformer registeredNodeArgsTransformer) bool { + return transformer.id == id + }) + } +} + +var ( + nodeArgsTransformerCounter int64 + nodeArgsTransformers []registeredNodeArgsTransformer +) + +type registeredNodeArgsTransformer struct { + id int64 + transformer NodeArgsTransformer +} + +// TransformNewNodeArgs is the helper for DSL functions which handles NodeArgsTransformers. +// +// Its return valus are intentionally the same as the internal.NewNode parameters, +// which makes it possible to chain the invocations: +// +// NewNode(transformNewNodeArgs(...)) +func TransformNewNodeArgs(exitIfErrors func([]error), deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (*types.DeprecationTracker, types.NodeType, string, []any) { + var errs []error + + // Most recent first... + // + // This intentionally doesn't use slices.Backward because + // using iterators influences stack unwinding. + for i := len(nodeArgsTransformers) - 1; i >= 0; i-- { + transformer := nodeArgsTransformers[i].transformer + args = UnrollInterfaceSlice(args) + + // We do not really need to recompute this on additional loop iterations, + // but its fast and simpler this way. + var offset Offset + for _, arg := range args { + if o, ok := arg.(Offset); ok { + offset = o + } + } + offset += 3 // The DSL function, this helper, and the TransformNodeArgs implementation. + + text, args, errs = transformer(nodeType, offset, text, args) + exitIfErrors(errs) + } + return deprecationTracker, nodeType, text, args +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 84eea0a59e..da58d54f95 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -125,7 +125,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] - // and grab the node on the spec that will represent which shufflable group this execution group belongs tu + // and grab the node on the spec that will represent which shufflable group this execution group belongs to shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle) //add the execution group to its shufflable group @@ -138,14 +138,35 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, } } + // now, for each shuffleable group, we compute the priority + shufflableGroupingIDPriorities := map[uint]int{} + for shufflableGroupingID, groupIDs := range shufflableGroupingIDToGroupIDs { + // the priority of a shufflable grouping is the max priority of any spec in any execution group in the shufflable grouping + maxPriority := -1 << 31 // min int + for _, groupID := range groupIDs { + for _, specIdx := range executionGroups[groupID] { + specPriority := specs[specIdx].Nodes.GetSpecPriority() + maxPriority = max(specPriority, maxPriority) + } + } + shufflableGroupingIDPriorities[shufflableGroupingID] = maxPriority + } + // now we permute the sorted shufflable grouping IDs and build the ordered Groups - orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) - for _, j := range permutation { - //let's get the execution group IDs for this shufflable group: - executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]] - // and we'll add their associated specindices to the orderedGroups slice: - for _, executionGroupID := range executionGroupIDsForJ { + shuffledGroupingIds := make([]uint, len(shufflableGroupingIDs)) + for i, j := range permutation { + shuffledGroupingIds[i] = shufflableGroupingIDs[j] + } + // now, we need to stable sort the shuffledGroupingIds by priority (higher priority first) + sort.SliceStable(shuffledGroupingIds, func(i, j int) bool { + return shufflableGroupingIDPriorities[shuffledGroupingIds[i]] > shufflableGroupingIDPriorities[shuffledGroupingIds[j]] + }) + + // we can now take these prioritized, shuffled, groupings and form the final set of ordered spec groups + orderedGroups := GroupedSpecIndices{} + for _, id := range shuffledGroupingIds { + for _, executionGroupID := range shufflableGroupingIDToGroupIDs[id] { orderedGroups = append(orderedGroups, executionGroups[executionGroupID]) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go index 11269cf1f2..165cbc4b67 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -236,7 +236,7 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { } functionCall.Filename = line[:delimiterIdx] line = strings.Split(line[delimiterIdx+1:], " ")[0] - lineNumber, err := strconv.ParseInt(line, 10, 64) + lineNumber, err := strconv.ParseInt(line, 10, 32) functionCall.Line = int(lineNumber) if err != nil { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go new file mode 100644 index 0000000000..8b7a9ceabf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go @@ -0,0 +1,158 @@ +package reporters + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/packages" +) + +func ptr[T any](in T) *T { + return &in +} + +type encoder interface { + Encode(v any) error +} + +// gojsonEvent matches the format from go internals +// https://github.com/golang/go/blob/master/src/cmd/internal/test2json/test2json.go#L31-L41 +// https://pkg.go.dev/cmd/test2json +type gojsonEvent struct { + Time *time.Time `json:",omitempty"` + Action GoJSONAction + Package string `json:",omitempty"` + Test string `json:",omitempty"` + Elapsed *float64 `json:",omitempty"` + Output *string `json:",omitempty"` + FailedBuild string `json:",omitempty"` +} + +type GoJSONAction string + +const ( + // start - the test binary is about to be executed + GoJSONStart GoJSONAction = "start" + // run - the test has started running + GoJSONRun GoJSONAction = "run" + // pause - the test has been paused + GoJSONPause GoJSONAction = "pause" + // cont - the test has continued running + GoJSONCont GoJSONAction = "cont" + // pass - the test passed + GoJSONPass GoJSONAction = "pass" + // bench - the benchmark printed log output but did not fail + GoJSONBench GoJSONAction = "bench" + // fail - the test or benchmark failed + GoJSONFail GoJSONAction = "fail" + // output - the test printed output + GoJSONOutput GoJSONAction = "output" + // skip - the test was skipped or the package contained no tests + GoJSONSkip GoJSONAction = "skip" +) + +func goJSONActionFromSpecState(state types.SpecState) GoJSONAction { + switch state { + case types.SpecStateInvalid: + return GoJSONFail + case types.SpecStatePending: + return GoJSONSkip + case types.SpecStateSkipped: + return GoJSONSkip + case types.SpecStatePassed: + return GoJSONPass + case types.SpecStateFailed: + return GoJSONFail + case types.SpecStateAborted: + return GoJSONFail + case types.SpecStatePanicked: + return GoJSONFail + case types.SpecStateInterrupted: + return GoJSONFail + case types.SpecStateTimedout: + return GoJSONFail + default: + panic("unexpected state should not happen") + } +} + +// gojsonReport wraps types.Report and calcualtes extra fields requires by gojson +type gojsonReport struct { + o types.Report + // Extra calculated fields + goPkg string + elapsed float64 +} + +func newReport(in types.Report) *gojsonReport { + return &gojsonReport{ + o: in, + } +} + +func (r *gojsonReport) Fill() error { + // NOTE: could the types.Report include the go package name? + goPkg, err := suitePathToPkg(r.o.SuitePath) + if err != nil { + return err + } + r.goPkg = goPkg + r.elapsed = r.o.RunTime.Seconds() + return nil +} + +// gojsonSpecReport wraps types.SpecReport and calculates extra fields required by gojson +type gojsonSpecReport struct { + o types.SpecReport + // extra calculated fields + testName string + elapsed float64 + action GoJSONAction +} + +func newSpecReport(in types.SpecReport) *gojsonSpecReport { + return &gojsonSpecReport{ + o: in, + } +} + +func (sr *gojsonSpecReport) Fill() error { + sr.elapsed = sr.o.RunTime.Seconds() + sr.testName = createTestName(sr.o) + sr.action = goJSONActionFromSpecState(sr.o.State) + return nil +} + +func suitePathToPkg(dir string) (string, error) { + cfg := &packages.Config{ + Mode: packages.NeedFiles | packages.NeedSyntax, + } + pkgs, err := packages.Load(cfg, dir) + if err != nil { + return "", err + } + if len(pkgs) != 1 { + return "", errors.New("error") + } + return pkgs[0].ID, nil +} + +func createTestName(spec types.SpecReport) string { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } + name = strings.TrimSpace(name) + return name +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go new file mode 100644 index 0000000000..ec5311d069 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go @@ -0,0 +1,111 @@ +package reporters + +type GoJSONEventWriter struct { + enc encoder + specSystemErrFn specSystemExtractFn + specSystemOutFn specSystemExtractFn +} + +func NewGoJSONEventWriter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONEventWriter { + return &GoJSONEventWriter{ + enc: enc, + specSystemErrFn: errFn, + specSystemOutFn: outFn, + } +} + +func (r *GoJSONEventWriter) writeEvent(e *gojsonEvent) error { + return r.enc.Encode(e) +} + +func (r *GoJSONEventWriter) WriteSuiteStart(report *gojsonReport) error { + e := &gojsonEvent{ + Time: &report.o.StartTime, + Action: GoJSONStart, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSuiteResult(report *gojsonReport) error { + var action GoJSONAction + switch { + case report.o.PreRunStats.SpecsThatWillRun == 0: + action = GoJSONSkip + case report.o.SuiteSucceeded: + action = GoJSONPass + default: + action = GoJSONFail + } + e := &gojsonEvent{ + Time: &report.o.EndTime, + Action: action, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + Elapsed: ptr(report.elapsed), + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecStart(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.StartTime, + Action: GoJSONRun, + Test: specReport.testName, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecOut(report *gojsonReport, specReport *gojsonSpecReport) error { + events := []*gojsonEvent{} + + stdErr := r.specSystemErrFn(specReport.o) + if stdErr != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdErr), + FailedBuild: "", + }) + } + stdOut := r.specSystemOutFn(specReport.o) + if stdOut != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdOut), + FailedBuild: "", + }) + } + + for _, ev := range events { + err := r.writeEvent(ev) + if err != nil { + return err + } + } + return nil +} + +func (r *GoJSONEventWriter) WriteSpecResult(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: specReport.action, + Test: specReport.testName, + Package: report.goPkg, + Elapsed: ptr(specReport.elapsed), + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go new file mode 100644 index 0000000000..633e49b88d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go @@ -0,0 +1,45 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type GoJSONReporter struct { + ev *GoJSONEventWriter +} + +type specSystemExtractFn func (spec types.SpecReport) string + +func NewGoJSONReporter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONReporter { + return &GoJSONReporter{ + ev: NewGoJSONEventWriter(enc, errFn, outFn), + } +} + +func (r *GoJSONReporter) Write(originalReport types.Report) error { + // suite start events + report := newReport(originalReport) + err := report.Fill() + if err != nil { + return err + } + r.ev.WriteSuiteStart(report) + for _, originalSpecReport := range originalReport.SpecReports { + specReport := newSpecReport(originalSpecReport) + err := specReport.Fill() + if err != nil { + return err + } + if specReport.o.LeafNodeType == types.NodeTypeIt { + // handle any It leaf node as a spec + r.ev.WriteSpecStart(report, specReport) + r.ev.WriteSpecOut(report, specReport) + r.ev.WriteSpecResult(report, specReport) + } else { + // handle any other leaf node as generic output + r.ev.WriteSpecOut(report, specReport) + } + } + r.ev.WriteSuiteResult(report) + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2d2ea2fc35..99c9c5f5be 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -2,6 +2,7 @@ package internal import ( "context" + "reflect" "github.com/onsi/ginkgo/v2/types" ) @@ -11,6 +12,7 @@ type SpecContext interface { SpecReport() types.SpecReport AttachProgressReporter(func() string) func() + WrappedContext() context.Context } type specContext struct { @@ -45,3 +47,28 @@ func NewSpecContext(suite *Suite) *specContext { func (sc *specContext) SpecReport() types.SpecReport { return sc.suite.CurrentSpecReport() } + +func (sc *specContext) WrappedContext() context.Context { + return sc.Context +} + +/* +The user is allowed to wrap `SpecContext` in a new context.Context when using AroundNodes. But body functions expect SpecContext. +We support this by taking their context.Context and returning a SpecContext that wraps it. +*/ +func wrapContextChain(ctx context.Context) SpecContext { + if ctx == nil { + return nil + } + if reflect.TypeOf(ctx) == reflect.TypeOf(&specContext{}) { + return ctx.(*specContext) + } else if sc, ok := ctx.Value("GINKGO_SPEC_CONTEXT").(*specContext); ok { + return &specContext{ + Context: ctx, + ProgressReporterManager: sc.ProgressReporterManager, + cancel: sc.cancel, + suite: sc.suite, + } + } + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 3edf507765..ef76cd099e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -32,6 +32,7 @@ type Suite struct { suiteNodes Nodes cleanupNodes Nodes + aroundNodes types.AroundNodes failer *Failer reporter reporters.Reporter @@ -41,6 +42,8 @@ type Suite struct { config types.SuiteConfig deadline time.Time + currentConstructionNodeReport *types.ConstructionNodeReport + skipAll bool report types.Report currentSpecReport types.SpecReport @@ -87,6 +90,7 @@ func (suite *Suite) Clone() (*Suite, error) { ProgressReporterManager: NewProgressReporterManager(), topLevelContainers: suite.topLevelContainers.Clone(), suiteNodes: suite.suiteNodes.Clone(), + aroundNodes: suite.aroundNodes.Clone(), selectiveLock: &sync.Mutex{}, }, nil } @@ -104,13 +108,14 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } ApplyNestedFocusPolicyToTree(suite.tree) specs := GenerateSpecsFromTreeRoot(suite.tree) - specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig) + specs = ComputeAroundNodes(specs) suite.phase = PhaseRun suite.client = client @@ -120,6 +125,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.outputInterceptor = outputInterceptor suite.interruptHandler = interruptHandler suite.config = suiteConfig + suite.aroundNodes = suiteAroundNodes if suite.config.Timeout > 0 { suite.deadline = time.Now().Add(suite.config.Timeout) @@ -127,7 +133,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) - success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs) cancelProgressHandler() @@ -199,6 +205,14 @@ func (suite *Suite) PushNode(node Node) error { err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) } }() + + // Ensure that code running in the body of the container node + // has access to information about the current container node(s). + suite.currentConstructionNodeReport = constructionNodeReportForTreeNode(suite.tree) + defer func() { + suite.currentConstructionNodeReport = nil + }() + node.Body(nil) return err }() @@ -259,6 +273,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + node.AroundNodes = types.AroundNodes{}.Append(suite.currentNode.AroundNodes...).Append(node.AroundNodes...) suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) suite.selectiveLock.Unlock() @@ -327,6 +342,16 @@ func (suite *Suite) By(text string, callback ...func()) error { return nil } +func (suite *Suite) CurrentConstructionNodeReport() types.ConstructionNodeReport { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + report := suite.currentConstructionNodeReport + if report == nil { + panic("CurrentConstructionNodeReport may only be called during construction of the spec tree") + } + return *report +} + /* Spec Running methods - used during PhaseRun */ @@ -428,13 +453,14 @@ func (suite *Suite) processCurrentSpecReport() { } } -func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { numSpecsThatWillBeRun := specs.CountWithoutSkip() suite.report = types.Report{ SuitePath: suitePath, SuiteDescription: description, SuiteLabels: suiteLabels, + SuiteSemVerConstraints: suiteSemVerConstraints, SuiteConfig: suite.config, SuiteHasProgrammaticFocus: hasProgrammaticFocus, PreRunStats: types.PreRunStats{ @@ -891,7 +917,30 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ failureC <- failureFromRun }() - node.Body(sc) + aroundNodes := types.AroundNodes{}.Append(suite.aroundNodes...).Append(node.AroundNodes...) + if len(aroundNodes) > 0 { + i := 0 + var f func(context.Context) + f = func(c context.Context) { + sc := wrapContextChain(c) + if sc == nil { + suite.failer.Fail("An AroundNode failed to pass a valid Ginkgo SpecContext in. You must always pass in a context derived from the context passed to you.", aroundNodes[i].CodeLocation) + return + } + i++ + if i < len(aroundNodes) { + aroundNodes[i].Body(sc, f) + } else { + node.Body(sc) + } + } + aroundNodes[0].Body(sc, f) + if i != len(aroundNodes) { + suite.failer.Fail("An AroundNode failed to call the passed in function.", aroundNodes[i].CodeLocation) + } + } else { + node.Body(sc) + } finished = true }() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index b4ecc7cb83..9806e315a6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -229,3 +229,9 @@ func (t *ginkgoTestingTProxy) ParallelTotal() int { func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { return t.attachProgressReporter(f) } +func (t *ginkgoTestingTProxy) Output() io.Writer { + return t.writer +} +func (t *ginkgoTestingTProxy) Attr(key, value string) { + t.addReportEntry(key, value, internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 74ad0768b7..026d9cf9b3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { if len(report.SuiteLabels) > 0 { r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) } + if len(report.SuiteSemVerConstraints) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", "))) + } r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) @@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { bannerWidth = len(labels) + 2 } } + if len(report.SuiteSemVerConstraints) > 0 { + semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints)) + if len(semVerConstraints)+2 > bannerWidth { + bannerWidth = len(semVerConstraints) + 2 + } + } r.emitBlock(strings.Repeat("=", bannerWidth)) out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) @@ -371,13 +381,22 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim cursor := 0 for _, entry := range timeline { tl := entry.GetTimelineLocation() - if tl.Offset < len(gw) { - r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) - cursor = tl.Offset - } else if cursor < len(gw) { + + end := tl.Offset + if end > len(gw) { + end = len(gw) + } + if end < cursor { + end = cursor + } + if cursor < end && cursor <= len(gw) && end <= len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:end])) + cursor = end + } else if cursor < len(gw) && end == len(gw) { r.emit(r.fi(indent, "%s", gw[cursor:])) cursor = len(gw) } + switch x := entry.(type) { case types.Failure: if isVeryVerbose { @@ -394,7 +413,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim case types.ReportEntry: r.emitReportEntry(indent, x) case types.ProgressReport: - r.emitProgressReport(indent, false, x) + r.emitProgressReport(indent, false, isVeryVerbose, x) case types.SpecEvent: if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { r.emitSpecEvent(indent, x, isVeryVerbose) @@ -448,7 +467,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur if !failure.ProgressReport.IsZero() { r.emitBlock("\n") - r.emitProgressReport(indent, false, failure.ProgressReport) + r.emitProgressReport(indent, false, false, failure.ProgressReport) } if failure.AdditionalFailure != nil && includeAdditionalFailure { @@ -464,11 +483,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) - r.emitProgressReport(1, shouldEmitGW, report) + r.emitProgressReport(1, shouldEmitGW, true, report) r.emitDelimiter(1) } -func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) { if report.Message != "" { r.emitBlock(r.fi(indent, report.Message+"\n")) indent += 1 @@ -504,6 +523,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::group::Progress Report")) + } + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) @@ -550,6 +573,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) r.emitGoroutines(indent, otherGoroutines...) } + + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::endgroup::")) + } } func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { @@ -698,8 +725,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { } func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { - texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} - texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{} + texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...) if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) @@ -707,6 +734,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) + semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints) locations = append(locations, report.LeafNodeLocation) failureLocation := report.Failure.FailureNodeLocation @@ -720,6 +748,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + semVerConstraints = append([][]string{{}}, semVerConstraints...) highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex @@ -747,6 +776,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(labels[i]) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) } + if len(semVerConstraints[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", ")) + } out += "\n" out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } @@ -770,6 +802,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) } + flattenedSemVerConstraints := report.SemVerConstraints() + if len(flattenedSemVerConstraints) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", ")) + } out += "\n" if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go new file mode 100644 index 0000000000..d02fb7a1ae --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go @@ -0,0 +1,61 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/internal/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +// GenerateGoTestJSONReport produces a JSON-formatted in the test2json format used by `go test -json` +func GenerateGoTestJSONReport(report types.Report, destination string) error { + // walk report and generate test2json-compatible objects + // JSON-encode the objects into filename + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } + f, err := os.Create(destination) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + r := reporters.NewGoJSONReporter( + enc, + systemErrForUnstructuredReporters, + systemOutForUnstructuredReporters, + ) + return r.Write(report) +} + +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupGoTestJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } + f, err := os.Create(destination) + if err != nil { + return messages, err + } + defer f.Close() + + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + _, err = f.Write(data) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not write to %s:\n%s", destination, err.Error())) + continue + } + os.Remove(source) + } + return messages, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 562e0f62ba..828f893fb8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -36,6 +36,9 @@ type JunitReportConfig struct { // Enable OmitSpecLabels to prevent labels from appearing in the spec name OmitSpecLabels bool + // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name + OmitSpecSemVerConstraints bool + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool @@ -169,9 +172,11 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))}, {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"SemVerFilter", report.SuiteConfig.SemVerFilter}, {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, @@ -207,6 +212,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit owner = matches[1] } } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = strings.TrimSpace(name) test := JUnitTestCase{ diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index e990ad82e1..55e1d1f4f7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error { name := report.SuiteDescription labels := report.SuiteLabels + semVerConstraints := report.SuiteSemVerConstraints if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) for _, spec := range report.SpecReports { name := fmt.Sprintf("[%s]", spec.LeafNodeType) @@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error { if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = tcEscape(name) fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index 5bf2e62e90..4e86dba84d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -27,6 +27,8 @@ CurrentSpecReport returns information about the current running spec. The returned object is a types.SpecReport which includes helper methods to make extracting information about the spec easier. +During construction of the test tree the result is empty. + You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec */ @@ -34,6 +36,31 @@ func CurrentSpecReport() SpecReport { return global.Suite.CurrentSpecReport() } +/* +ConstructionNodeReport describes the container nodes during construction of +the spec tree. It provides a subset of the information that is provided +by SpecReport at runtime. + +It is documented here: [types.ConstructionNodeReport] +*/ +type ConstructionNodeReport = types.ConstructionNodeReport + +/* +CurrentConstructionNodeReport returns information about the current container nodes +that are leading to the current path in the spec tree. +The returned object is a types.ConstructionNodeReport which includes helper methods +to make extracting information about the spec easier. + +May only be called during construction of the spec tree. It panics when +called while tests are running. Use CurrentSpecReport instead in that +phase. + +You can learn more about ConstructionNodeReport here: [types.ConstructionNodeReport] +*/ +func CurrentTreeConstructionNodeReport() ConstructionNodeReport { + return global.Suite.CurrentConstructionNodeReport() +} + /* ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter @@ -92,7 +119,7 @@ func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))) } /* @@ -116,7 +143,7 @@ func ReportAfterEach(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))) } /* @@ -145,7 +172,7 @@ You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spe func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))) } /* @@ -165,7 +192,7 @@ ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Co When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes -In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. +In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, GoJSON, JUnit, and Teamcity formatted reports using the --json-report, --gojson-report, --junit-report, and --teamcity-report ginkgo CLI flags. You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically @@ -177,7 +204,7 @@ You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spe func ReportAfterSuite(text string, body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))) } func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { @@ -188,6 +215,12 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error())) } } + if reporterConfig.GoJSONReport != "" { + err := reporters.GenerateGoTestJSONReport(report, reporterConfig.GoJSONReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Go JSON report:\n%s", err.Error())) + } + } if reporterConfig.JUnitReport != "" { err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport) if err != nil { @@ -206,6 +239,9 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re if reporterConfig.JSONReport != "" { flags = append(flags, "--json-report") } + if reporterConfig.GoJSONReport != "" { + flags = append(flags, "--gojson-report") + } if reporterConfig.JUnitReport != "" { flags = append(flags, "--junit-report") } @@ -213,9 +249,11 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re flags = append(flags, "--teamcity-report") } pushNode(internal.NewNode( - deprecationTracker, types.NodeTypeReportAfterSuite, - fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), - body, - types.NewCustomCodeLocation("autogenerated by Ginkgo"), + internal.TransformNewNodeArgs( + exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, + fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), + body, + types.NewCustomCodeLocation("autogenerated by Ginkgo"), + ), )) } diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index b9e0ca9ef7..1031aa8554 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -309,11 +309,11 @@ func generateTable(description string, isSubtree bool, args ...any) { internalNodeType = types.NodeTypeContainer } - pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...)) + pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, internalNodeType, description, internalNodeArgs...))) } }) - pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) + pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))) } func invokeFunction(function any, parameters []any) []reflect.Value { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go new file mode 100644 index 0000000000..a069e0623d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go @@ -0,0 +1,56 @@ +package types + +import ( + "context" +) + +type AroundNodeAllowedFuncs interface { + ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func() +} +type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context)) + +func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator { + if f == nil { + panic("BuildAroundNode cannot be called with a nil function.") + } + var aroundNodeFunc func(context.Context, func(context.Context)) + switch x := any(f).(type) { + case func(context.Context, func(context.Context)): + aroundNodeFunc = x + case func(context.Context) context.Context: + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + ctx = x(ctx) + body(ctx) + } + case func(): + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + x() + body(ctx) + } + } + + return AroundNodeDecorator{ + Body: aroundNodeFunc, + CodeLocation: cl, + } +} + +type AroundNodeDecorator struct { + Body AroundNodeFunc + CodeLocation CodeLocation +} + +type AroundNodes []AroundNodeDecorator + +func (an AroundNodes) Clone() AroundNodes { + out := make(AroundNodes, len(an)) + copy(out, an) + return out +} + +func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes { + out := make(AroundNodes, len(an)+len(other)) + copy(out, an) + copy(out[len(an):], other) + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 2e827efe30..f847036046 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -24,6 +24,7 @@ type SuiteConfig struct { FocusFiles []string SkipFiles []string LabelFilter string + SemVerFilter string FailOnPending bool FailOnEmpty bool FailFast bool @@ -95,6 +96,7 @@ type ReporterConfig struct { ForceNewlines bool JSONReport string + GoJSONReport string JUnitReport string TeamcityReport string } @@ -111,7 +113,7 @@ func (rc ReporterConfig) Verbosity() VerbosityLevel { } func (rc ReporterConfig) WillGenerateReport() bool { - return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" + return rc.JSONReport != "" || rc.GoJSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" } func NewDefaultReporterConfig() ReporterConfig { @@ -308,6 +310,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version", + Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", @@ -356,6 +360,8 @@ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.GoJSONReport", Name: "gojson-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Go JSON-formatted test report at the specified location."}, {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", @@ -443,6 +449,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re } } + if suiteConfig.SemVerFilter != "" { + _, err := ParseSemVerFilter(suiteConfig.SemVerFilter) + if err != nil { + errors = append(errors, err) + } + } + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { case "", "dup", "swap", "none": default: @@ -573,6 +586,9 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, +} + +var GoBuildOFlags = GinkgoFlags{ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", Usage: "output binary path (including name)."}, } @@ -673,7 +689,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( - GoBuildFlags, + GoBuildFlags.CopyAppend(GoBuildOFlags...), map[string]any{ "Go": &goFlagsConfig, }, @@ -763,6 +779,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoBuildOFlags...) bindings := map[string]any{ "C": cliConfig, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index c2796b5490..59313238cf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { } } +func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid SemVerConstraint", + Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg), + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + +func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty SemVerConstraint", + Message: "SemVerConstraint cannot be empty", + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + /* Table errors */ func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { return GinkgoError{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go new file mode 100644 index 0000000000..3fc2ed144b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go @@ -0,0 +1,60 @@ +package types + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" +) + +type SemVerFilter func([]string) bool + +func MustParseSemVerFilter(input string) SemVerFilter { + filter, err := ParseSemVerFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) { + if filterVersion == "" { + return func(_ []string) bool { return true }, nil + } + + targetVersion, err := semver.NewVersion(filterVersion) + if err != nil { + return nil, fmt.Errorf("invalid filter version: %w", err) + } + + return func(constraints []string) bool { + // unconstrained specs always run + if len(constraints) == 0 { + return true + } + + for _, constraintStr := range constraints { + constraint, err := semver.NewConstraint(constraintStr) + if err != nil { + return false + } + + if !constraint.Check(targetVersion) { + return false + } + } + + return true + }, nil +} + +func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) { + if len(semVerConstraint) == 0 { + return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl) + } + _, err := semver.NewConstraint(semVerConstraint) + if err != nil { + return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl) + } + + return semVerConstraint, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index ddcbec1ba8..9981a0dd68 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" "sort" "strings" "time" @@ -19,6 +20,57 @@ func init() { } } +// ConstructionNodeReport captures information about a Ginkgo spec. +type ConstructionNodeReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // IsSerial captures whether the any container has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether any container is an Ordered container + IsInOrderedContainer bool +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report ConstructionNodeReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report ConstructionNodeReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + + return out +} + // Report captures information about a Ginkgo test run type Report struct { //SuitePath captures the absolute path to the test suite @@ -30,6 +82,9 @@ type Report struct { //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function SuiteLabels []string + //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function + SuiteSemVerConstraints []string + //SuiteSucceeded captures the success or failure status of the test run //If true, the test run is considered successful. //If false, the test run is considered unsuccessful @@ -129,13 +184,21 @@ type SpecReport struct { // all Describe/Context/When containers in this spec's hierarchy ContainerHierarchyLabels [][]string - // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be // one of the NodeTypesForSuiteLevelNodes node types) - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + + // Captures the Spec Priority + SpecPriority int // State captures whether the spec has passed, failed, etc. State SpecState @@ -198,48 +261,52 @@ type SpecReport struct { func (report SpecReport) MarshalJSON() ([]byte, error) { //All this to avoid emitting an empty Failure struct in the JSON out := struct { - ContainerHierarchyTexts []string - ContainerHierarchyLocations []CodeLocation - ContainerHierarchyLabels [][]string - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string - State SpecState - StartTime time.Time - EndTime time.Time - RunTime time.Duration - ParallelProcess int - Failure *Failure `json:",omitempty"` - NumAttempts int - MaxFlakeAttempts int - MaxMustPassRepeatedly int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` - ProgressReports []ProgressReport `json:",omitempty"` - AdditionalFailures []AdditionalFailure `json:",omitempty"` - SpecEvents SpecEvents `json:",omitempty"` + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + ContainerHierarchySemVerConstraints [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ - ContainerHierarchyTexts: report.ContainerHierarchyTexts, - ContainerHierarchyLocations: report.ContainerHierarchyLocations, - ContainerHierarchyLabels: report.ContainerHierarchyLabels, - LeafNodeType: report.LeafNodeType, - LeafNodeLocation: report.LeafNodeLocation, - LeafNodeLabels: report.LeafNodeLabels, - LeafNodeText: report.LeafNodeText, - State: report.State, - StartTime: report.StartTime, - EndTime: report.EndTime, - RunTime: report.RunTime, - ParallelProcess: report.ParallelProcess, - Failure: nil, - ReportEntries: nil, - NumAttempts: report.NumAttempts, - MaxFlakeAttempts: report.MaxFlakeAttempts, - MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, - CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, - CapturedStdOutErr: report.CapturedStdOutErr, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, } if !report.Failure.IsZero() { @@ -287,6 +354,9 @@ func (report SpecReport) FullText() string { if report.LeafNodeText != "" { texts = append(texts, report.LeafNodeText) } + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) return strings.Join(texts, " ") } @@ -312,6 +382,28 @@ func (report SpecReport) Labels() []string { return out } +// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints. +func (report SpecReport) SemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints { + for _, semVerConstraint := range semVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + } + for _, semVerConstraint := range report.LeafNodeSemVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + + return out +} + // MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) @@ -321,6 +413,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } +// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) { + filter, err := ParseSemVerFilter(version) + if err != nil { + return false, err + } + return filter(report.SemVerConstraints()), nil +} + // FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 158ac2fd89..2a50192871 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.23.4" +const VERSION = "2.27.3" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 890d892228..b7d7309f3f 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,32 @@ +## 1.38.2 + +- roll back to go 1.23.0 [c404969] + +## 1.38.1 + +### Fixes + +Numerous minor fixes and dependency bumps + +## 1.38.0 + +### Features +- gstruct handles extra unexported fields [4ee7ed0] + +### Fixes +- support [] in IgnoringTopFunction function signatures (#851) [36bbf72] + +### Maintenance +- Bump golang.org/x/net from 0.40.0 to 0.41.0 (#846) [529d408] +- Fix typo [acd1f55] +- Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#835) [bae65a0] +- Bump nokogiri from 1.18.4 to 1.18.8 in /docs (#842) [8dda91f] +- Bump golang.org/x/net from 0.39.0 to 0.40.0 (#843) [212d812] +- Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#839) [59bd7f9] +- Bump nokogiri from 1.18.1 to 1.18.4 in /docs (#834) [328c729] +- Bump uri from 1.0.2 to 1.0.3 in /docs (#826) [9a798a1] +- Bump golang.org/x/net from 0.37.0 to 0.39.0 (#841) [04a72c6] + ## 1.37.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gmeasure/experiment.go b/vendor/github.com/onsi/gomega/gmeasure/experiment.go index 9bf4b3b43f..9d1b74a78b 100644 --- a/vendor/github.com/onsi/gomega/gmeasure/experiment.go +++ b/vendor/github.com/onsi/gomega/gmeasure/experiment.go @@ -469,9 +469,9 @@ func (e *Experiment) Sample(callback func(idx int), samplingConfig SamplingConfi wg.Wait() }() if numParallel > 1 { + wg.Add(numParallel) for worker := 0; worker < numParallel; worker++ { go func() { - wg.Add(1) for idx := range work { callback(idx) } diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index a491a64be7..fdba34ee9d 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.37.0" +const GOMEGA_VERSION = "1.38.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -178,7 +178,7 @@ func ensureDefaultGomegaIsConfigured() { // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: // diff --git a/vendor/github.com/onsi/gomega/gstruct/fields.go b/vendor/github.com/onsi/gomega/gstruct/fields.go index bf4c0ae109..2f4685fc48 100644 --- a/vendor/github.com/onsi/gomega/gstruct/fields.go +++ b/vendor/github.com/onsi/gomega/gstruct/fields.go @@ -8,6 +8,7 @@ import ( "reflect" "runtime/debug" "strings" + "unicode" "github.com/onsi/gomega/format" errorsutil "github.com/onsi/gomega/gstruct/errors" @@ -65,6 +66,7 @@ func MatchFields(options Options, fields Fields) types.GomegaMatcher { return &FieldsMatcher{ Fields: fields, IgnoreExtras: options&IgnoreExtras != 0, + IgnoreUnexportedExtras: options&IgnoreUnexportedExtras != 0, IgnoreMissing: options&IgnoreMissing != 0, } } @@ -75,6 +77,8 @@ type FieldsMatcher struct { // Whether to ignore extra elements or consider it an error. IgnoreExtras bool + // Whether to ignore unexported extra elements or consider it an error. + IgnoreUnexportedExtras bool // Whether to ignore missing elements or consider it an error. IgnoreMissing bool @@ -97,6 +101,14 @@ func (m *FieldsMatcher) Match(actual any) (success bool, err error) { return true, nil } +func isExported(fieldName string) bool { + if fieldName == "" { + return false + } + r := []rune(fieldName)[0] + return unicode.IsUpper(r) +} + func (m *FieldsMatcher) matchFields(actual any) (errs []error) { val := reflect.ValueOf(actual) typ := val.Type() @@ -116,13 +128,21 @@ func (m *FieldsMatcher) matchFields(actual any) (errs []error) { matcher, expected := m.Fields[fieldName] if !expected { + if m.IgnoreUnexportedExtras && !isExported(fieldName) { + return nil + } if !m.IgnoreExtras { return fmt.Errorf("unexpected field %s: %+v", fieldName, actual) } return nil } - field := val.Field(i).Interface() + var field any + if _, isIgnoreMatcher := matcher.(*IgnoreMatcher) ; isIgnoreMatcher { + field = struct {}{} // the matcher does not care about the actual value + } else { + field = val.Field(i).Interface() + } match, err := matcher.Match(field) if err != nil { diff --git a/vendor/github.com/onsi/gomega/gstruct/types.go b/vendor/github.com/onsi/gomega/gstruct/types.go index 54222221ce..a5f6c390bd 100644 --- a/vendor/github.com/onsi/gomega/gstruct/types.go +++ b/vendor/github.com/onsi/gomega/gstruct/types.go @@ -12,4 +12,8 @@ const ( //considered by the identifier function. All members that map to a given key must still match successfully //with the matcher that is provided for that key. AllowDuplicates + //IgnoreUnexportedExtras tells the matcher to ignore extra unexported fields, rather than triggering a failure. + //it is not possible to check the value of unexported fields, so this option is only useful when you want to + //check every exported fields, but you don't care about extra unexported fields. + IgnoreUnexportedExtras ) diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index a3a646e4ad..4121505b62 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -452,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } else { var fgErr formattedGomegaError - if errors.As(actualErr, &fgErr) { + if errors.As(matcherErr, &fgErr) { message += fgErr.FormattedGomegaError() + "\n" } else { message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 532fc37449..ce74eee4c7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -2,6 +2,7 @@ package matchers import ( "bytes" + "errors" "fmt" "github.com/google/go-cmp/cmp" @@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr if err, ok := r.(error); ok { matchErr = err } else if errMsg, ok := r.(string); ok { - matchErr = fmt.Errorf(errMsg) + matchErr = errors.New(errMsg) } } }() diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 95057c26cc..c3da9bd48b 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type MatchYAMLMatcher struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c0..2331b8b4f3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index 8b016355ad..7bac0da33d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { groups = append(groups, group) } return groups @@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) + _, err := fmt.Fprintf(buf, format, args...) return err } ws := func(s string) error { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef926..d273b6640e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f292..5fe8d3b4d2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 592eec3e24..76e59f1288 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { case pb.Counter != nil: pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] case pb.Histogram != nil: + h := pb.Histogram for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && + e.GetTimestamp() != nil { + h.Exemplars = append(h.Exemplars, e) + if len(h.Bucket) == 0 { + // Don't proceed to classic buckets if there are none. + continue + } + } + // h.Bucket are sorted by UpperBound. + i := sort.Search(len(h.Bucket), func(i int) bool { + return h.Bucket[i].GetUpperBound() >= e.GetValue() }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e + if i < len(h.Bucket) { + h.Bucket[i].Exemplar = e } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + CumulativeCount: proto.Uint64(h.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + h.Bucket = append(h.Bucket, b) } } default: @@ -227,6 +237,7 @@ type Exemplar struct { // Only last applicable exemplar is injected from the list. // For example for Counter it means last exemplar is injected. // For Histogram, it means last applicable exemplar for each bucket is injected. +// For a Native Histogram, all valid exemplars are injected. // // NewMetricWithExemplars works best with MustNewConstMetric and // MustNewConstHistogram, see example. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go index 0a61b98461..b32c95fa3f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -25,9 +25,9 @@ import ( "golang.org/x/sys/unix" ) -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// errNotImplemented is returned by stub functions that replace cgo functions, when cgo // isn't available. -var notImplementedErr = errors.New("not implemented") +var errNotImplemented = errors.New("not implemented") type memoryInfo struct { vsize uint64 // Virtual memory size in bytes @@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if memInfo, err := getMemory(); err == nil { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { + } else if !errors.Is(err, errNotImplemented) { // Don't report an error when support is not compiled in. c.reportError(ch, c.rss, err) c.reportError(ch, c.vsize, err) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go index 8ddb0995d6..378865129b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -16,7 +16,7 @@ package prometheus func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr + return nil, errNotImplemented } // describe returns all descriptions of the collector for Darwin. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 9f4b130bef..8074f70f5d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if netstat, err := p.Netstat(); err == nil { var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets + if netstat.InOctets != nil { + inOctets = *netstat.InOctets } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets + if netstat.OutOctets != nil { + outOctets = *netstat.OutOctets } ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 356edb7868..9332b0249a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { labels := prometheus.Labels{} - if !(code || method) { + if !code && !method { return labels } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 1258508e4f..80a4d7c355 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -262,7 +262,7 @@ func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNa // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { - var tp expfmt.TextParser + tp := expfmt.NewTextParser(model.UTF8Validation) notNormalized, err := tp.TextToMetricFamilies(reader) if err != nil { return nil, fmt.Errorf("converting reader to metric families failed: %w", err) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 2c808eece0..487b466563 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { return false } - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) + return m.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those @@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool { return false } - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) + return m.deleteByHashWithLabels(h, labels, m.curry) } // DeletePartialMatch deletes all metrics where the variable labels contain all of those @@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels, closer := constrainLabels(m.desc, labels) defer closer() - return m.metricMap.deleteByLabels(labels, m.curry) + return m.deleteByLabels(labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't @@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names @@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 25da157f15..2ed1285068 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, +// fact, those metrics are already prefixed with "go_" or "process_", // respectively.) // // Conflicts between Collectors registered through the original Registerer with @@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { } } +// WrapCollectorWith returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapCollectorWith can be useful to work with multiple instances of a third +// party library that does not expose enough flexibility on the lifecycle of its +// registered metrics. +// For example, let's say you have a foo.New(reg Registerer) constructor that +// registers metrics but never unregisters them, and you want to create multiple +// instances of foo.Foo with different labels. +// The way to achieve that, is to create a new Registry, pass it to foo.New, +// then use WrapCollectorWith to wrap that Registry with the desired labels and +// register that as a collector in your main Registry. +// Then you can un-register the wrapped collector effectively un-registering the +// metrics registered by foo.New. +func WrapCollectorWith(labels Labels, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + labels: labels, + } +} + +// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided prefix to the name of all Metrics it collects. +// +// See the documentation of WrapCollectorWith for more details on the use case. +func WrapCollectorWithPrefix(prefix string, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + prefix: prefix, + } +} + type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f..7b762370e2 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f55..73c24dfbc9 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560d..c34c7de432 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4e..0290f6abc4 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1f..8dbf6d04ed 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b33..c4e9c1bbc3 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d2..8f2edde324 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,25 +363,30 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..460f554f29 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..dfeb34be5f 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,33 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da33..9de47b2568 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 5766107cf9..3feebf328a 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,17 +24,30 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,16 +64,151 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -89,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -163,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -298,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -315,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -333,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -397,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 5727452c1e..1730b0fdc1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 8050637d82..a9995a37ee 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e83..91ce5b7a45 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee63..078910f46b 100644 --- a/vendor/github.com/prometheus/common/model/value_type.go +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67a..3c3bf910fd 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 1617292350..0ed55c2ba2 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.0.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 1224816c2a..0718239cf1 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7ccc..2e53344151 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 4980c875bf..9bdaccc7c8 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69a..1b5bdbdf84 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa03..7db8633077 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610e..3a43e83915 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc788..5a7d2df06a 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875ceec..d5404a6d72 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c810..50caa73274 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 0000000000..f50b38e352 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a4..19e3378f72 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709f..8d4b1ac05b 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 5277629557..0396d72015 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index d868cebdaa..d7e0cacb4c 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 142796368f..368187fa88 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f571..4a64347c03 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 776f349717..d15b66ddb6 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d794..4248c1716e 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e8208..9a297afcf8 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642a..4bdc90b07e 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a13..fb7fd3995b 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index a055197c63..dd8aa56885 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef8..3810d1ac99 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 28708e0745..403e6ae708 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index 7eacc5bdbe..388c4e5ead 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -284,6 +284,33 @@ func main() { } ``` +### Using pflag with go test +`pflag` does not parse the shorthand versions of go test's built-in flags (i.e., those starting with `-test.`). +For more context, see issues [#63](https://github.com/spf13/pflag/issues/63) and [#238](https://github.com/spf13/pflag/issues/238) for more details. + +For example, if you use pflag in your `TestMain` function and call `pflag.Parse()` after defining your custom flags, running a test like this: +```bash +go test /your/tests -run ^YourTest -v --your-test-pflags +``` +will result in the `-v` flag being ignored. This happens because of the way pflag handles flag parsing, skipping over go test's built-in shorthand flags. +To work around this, you can use the `ParseSkippedFlags` function, which ensures that go test's flags are parsed separately using the standard flag package. + +**Example**: You want to parse go test flags that are otherwise ignore by `pflag.Parse()` +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.ParseSkippedFlags(os.Args[1:], goflag.CommandLine) + flag.Parse() +} +``` + ## More info You can see the full reference documentation of the pflag package diff --git a/vendor/github.com/spf13/pflag/bool_func.go b/vendor/github.com/spf13/pflag/bool_func.go new file mode 100644 index 0000000000..83d77afa89 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_func.go @@ -0,0 +1,40 @@ +package pflag + +// -- func Value +type boolfuncValue func(string) error + +func (f boolfuncValue) Set(s string) error { return f(s) } + +func (f boolfuncValue) Type() string { return "boolfunc" } + +func (f boolfuncValue) String() string { return "" } // same behavior as stdlib 'flag' package + +func (f boolfuncValue) IsBoolFlag() bool { return true } + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func (f *FlagSet) BoolFunc(name string, usage string, fn func(string) error) { + f.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + var val Value = boolfuncValue(fn) + flag := f.VarPF(val, name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func BoolFunc(name string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, shorthand, usage, fn) +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index a0b2679f71..d49c0143c1 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -85,7 +85,7 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func Count(name string, usage string) *int { return CommandLine.CountP(name, "", usage) } diff --git a/vendor/github.com/spf13/pflag/errors.go b/vendor/github.com/spf13/pflag/errors.go new file mode 100644 index 0000000000..ff11b66bef --- /dev/null +++ b/vendor/github.com/spf13/pflag/errors.go @@ -0,0 +1,149 @@ +package pflag + +import "fmt" + +// notExistErrorMessageType specifies which flavor of "flag does not exist" +// is printed by NotExistError. This allows the related errors to be grouped +// under a single NotExistError struct without making a breaking change to +// the error message text. +type notExistErrorMessageType int + +const ( + flagNotExistMessage notExistErrorMessageType = iota + flagNotDefinedMessage + flagNoSuchFlagMessage + flagUnknownFlagMessage + flagUnknownShorthandFlagMessage +) + +// NotExistError is the error returned when trying to access a flag that +// does not exist in the FlagSet. +type NotExistError struct { + name string + specifiedShorthands string + messageType notExistErrorMessageType +} + +// Error implements error. +func (e *NotExistError) Error() string { + switch e.messageType { + case flagNotExistMessage: + return fmt.Sprintf("flag %q does not exist", e.name) + + case flagNotDefinedMessage: + return fmt.Sprintf("flag accessed but not defined: %s", e.name) + + case flagNoSuchFlagMessage: + return fmt.Sprintf("no such flag -%v", e.name) + + case flagUnknownFlagMessage: + return fmt.Sprintf("unknown flag: --%s", e.name) + + case flagUnknownShorthandFlagMessage: + c := rune(e.name[0]) + return fmt.Sprintf("unknown shorthand flag: %q in -%s", c, e.specifiedShorthands) + } + + panic(fmt.Errorf("unknown flagNotExistErrorMessageType: %v", e.messageType)) +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *NotExistError) GetSpecifiedName() string { + return e.name +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *NotExistError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// ValueRequiredError is the error returned when a flag needs an argument but +// no argument was provided. +type ValueRequiredError struct { + flag *Flag + specifiedName string + specifiedShorthands string +} + +// Error implements error. +func (e *ValueRequiredError) Error() string { + if len(e.specifiedShorthands) > 0 { + c := rune(e.specifiedName[0]) + return fmt.Sprintf("flag needs an argument: %q in -%s", c, e.specifiedShorthands) + } + + return fmt.Sprintf("flag needs an argument: --%s", e.specifiedName) +} + +// GetFlag returns the flag for which the error occurred. +func (e *ValueRequiredError) GetFlag() *Flag { + return e.flag +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *ValueRequiredError) GetSpecifiedName() string { + return e.specifiedName +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *ValueRequiredError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// InvalidValueError is the error returned when an invalid value is used +// for a flag. +type InvalidValueError struct { + flag *Flag + value string + cause error +} + +// Error implements error. +func (e *InvalidValueError) Error() string { + flag := e.flag + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Sprintf("invalid argument %q for %q flag: %v", e.value, flagName, e.cause) +} + +// Unwrap implements errors.Unwrap. +func (e *InvalidValueError) Unwrap() error { + return e.cause +} + +// GetFlag returns the flag for which the error occurred. +func (e *InvalidValueError) GetFlag() *Flag { + return e.flag +} + +// GetValue returns the invalid value that was provided. +func (e *InvalidValueError) GetValue() string { + return e.value +} + +// InvalidSyntaxError is the error returned when a bad flag name is passed on +// the command line. +type InvalidSyntaxError struct { + specifiedFlag string +} + +// Error implements error. +func (e *InvalidSyntaxError) Error() string { + return fmt.Sprintf("bad flag syntax: %s", e.specifiedFlag) +} + +// GetSpecifiedName returns the exact flag (with dashes) as it +// appeared in the parsed arguments. +func (e *InvalidSyntaxError) GetSpecifiedFlag() string { + return e.specifiedFlag +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 7c058de374..2fd3c57597 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -27,23 +27,32 @@ unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } + Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) @@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } flag.VarP(&flagval, "varname", "v", "help message") + Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: + --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags -f -abc @@ -124,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -145,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -381,7 +404,7 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { flag := f.Lookup(name) if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) + err := &NotExistError{name: name, messageType: flagNotDefinedMessage} return nil, err } @@ -411,7 +434,7 @@ func (f *FlagSet) ArgsLenAtDash() int { func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -427,7 +450,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -441,7 +464,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro func (f *FlagSet) MarkHidden(name string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } flag.Hidden = true return nil @@ -464,18 +487,16 @@ func (f *FlagSet) Set(name, value string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } err := flag.Value.Set(value) if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) + return &InvalidValueError{ + flag: flag, + value: value, + cause: err, } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } if !flag.Changed { @@ -501,7 +522,7 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } if flag.Annotations == nil { flag.Annotations = map[string][]string{} @@ -538,7 +559,7 @@ func (f *FlagSet) PrintDefaults() { func (f *Flag) defaultIsZeroValue() bool { switch f.Value.(type) { case boolFlag: - return f.DefValue == "false" + return f.DefValue == "false" || f.DefValue == "" case *durationValue: // Beginning in Go 1.7, duration zero values are "0s" return f.DefValue == "0" || f.DefValue == "0s" @@ -551,7 +572,7 @@ func (f *Flag) defaultIsZeroValue() bool { case *intSliceValue, *stringSliceValue, *stringArrayValue: return f.DefValue == "[]" default: - switch f.Value.String() { + switch f.DefValue { case "false": return true case "": @@ -588,8 +609,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = flag.Value.Type() switch name { - case "bool": + case "bool", "boolfunc": name = "" + case "func": + name = "value" case "float64": name = "float" case "int64": @@ -707,7 +730,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { switch flag.Value.Type() { case "string": line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": + case "bool", "boolfunc": if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -911,12 +934,10 @@ func VarP(value Value, name, shorthand, usage string) { CommandLine.VarP(value, name, shorthand, usage) } -// failf prints to standard error a formatted error and usage message and +// fail prints an error message and usage message to standard error and // returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) +func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -934,9 +955,9 @@ func (f *FlagSet) usage() { } } -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) +// --unknown (args will be empty) +// --unknown --next-flag ... (args will be --next-flag ...) +// --unknown arg ... (args will be arg ...) func stripUnknownFlagValue(args []string) []string { if len(args) == 0 { //--unknown @@ -960,7 +981,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = args name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) + err = f.fail(&InvalidSyntaxError{specifiedFlag: s}) return } @@ -974,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -982,7 +1005,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin return stripUnknownFlagValue(a), nil default: - err = f.failf("unknown flag: --%s", name) + err = f.fail(&NotExistError{name: name, messageType: flagUnknownFlagMessage}) return } } @@ -1000,13 +1023,16 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = a[1:] } else { // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: name, + }) return } err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1014,7 +1040,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { outArgs = args - if strings.HasPrefix(shorthands, "test.") { + if isGotestShorthandFlag(shorthands) { return } @@ -1029,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1039,7 +1067,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = stripUnknownFlagValue(outArgs) return default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + err = f.fail(&NotExistError{ + name: string(c), + specifiedShorthands: shorthands, + messageType: flagUnknownShorthandFlagMessage, + }) return } } @@ -1062,7 +1094,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = args[1:] } else { // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: string(c), + specifiedShorthands: shorthands, + }) return } @@ -1072,7 +1108,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1135,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true - if len(arguments) < 0 { + f.args = make([]string, 0, len(arguments)) + + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1151,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1177,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/func.go b/vendor/github.com/spf13/pflag/func.go new file mode 100644 index 0000000000..9f4d88f271 --- /dev/null +++ b/vendor/github.com/spf13/pflag/func.go @@ -0,0 +1,37 @@ +package pflag + +// -- func Value +type funcValue func(string) error + +func (f funcValue) Set(s string) error { return f(s) } + +func (f funcValue) Type() string { return "func" } + +func (f funcValue) String() string { return "" } // same behavior as stdlib 'flag' package + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func (f *FlagSet) Func(name string, usage string, fn func(string) error) { + f.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) FuncP(name string, shorthand string, usage string, fn func(string) error) { + var val Value = funcValue(fn) + f.VarP(val, name, shorthand, usage) +} + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func Func(name string, usage string, fn func(string) error) { + CommandLine.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func FuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.FuncP(name, shorthand, usage, fn) +} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index d3dd72b7fe..e62eab5381 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,8 +8,18 @@ import ( goflag "flag" "reflect" "strings" + "time" ) +// go test flags prefixes +func isGotestFlag(flag string) bool { + return strings.HasPrefix(flag, "-test.") +} + +func isGotestShorthandFlag(flag string) bool { + return strings.HasPrefix(flag, "test.") +} + // flagValueWrapper implements pflag.Value around a flag.Value. The main // difference here is the addition of the Type method that returns a string // name of the type. As this is generally unknown, we approximate that with @@ -103,3 +113,49 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { } f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } + +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + +// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), +// since by default those are skipped by pflag.Parse(). +// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` +func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { + var skippedFlags []string + for _, f := range osArgs { + if isGotestFlag(f) { + skippedFlags = append(skippedFlags, f) + } + } + return goFlagSet.Parse(skippedFlags) +} + diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go index 6b541aa879..c6e89da18d 100644 --- a/vendor/github.com/spf13/pflag/ipnet_slice.go +++ b/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -73,7 +73,7 @@ func (s *ipNetSliceValue) String() string { func ipNetSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IPNet{}, nil } diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc0..1d1e3bf91a 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/text.go b/vendor/github.com/spf13/pflag/text.go new file mode 100644 index 0000000000..886d5a3d80 --- /dev/null +++ b/vendor/github.com/spf13/pflag/text.go @@ -0,0 +1,81 @@ +package pflag + +import ( + "encoding" + "fmt" + "reflect" +) + +// following is copied from go 1.23.4 flag.go +type textValue struct{ p encoding.TextUnmarshaler } + +func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue { + ptrVal := reflect.ValueOf(p) + if ptrVal.Kind() != reflect.Ptr { + panic("variable value type must be a pointer") + } + defVal := reflect.ValueOf(val) + if defVal.Kind() == reflect.Ptr { + defVal = defVal.Elem() + } + if defVal.Type() != ptrVal.Type().Elem() { + panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem())) + } + ptrVal.Elem().Set(defVal) + return textValue{p} +} + +func (v textValue) Set(s string) error { + return v.p.UnmarshalText([]byte(s)) +} + +func (v textValue) Get() interface{} { + return v.p +} + +func (v textValue) String() string { + if m, ok := v.p.(encoding.TextMarshaler); ok { + if b, err := m.MarshalText(); err == nil { + return string(b) + } + } + return "" +} + +//end of copy + +func (v textValue) Type() string { + return reflect.ValueOf(v.p).Type().Name() +} + +// GetText set out, which implements encoding.UnmarshalText, to the value of a flag with given name +func (f *FlagSet) GetText(name string, out encoding.TextUnmarshaler) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag accessed but not defined: %s", name) + } + if flag.Value.Type() != reflect.TypeOf(out).Name() { + return fmt.Errorf("trying to get %s value of flag of type %s", reflect.TypeOf(out).Name(), flag.Value.Type()) + } + return out.UnmarshalText([]byte(flag.Value.String())) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, shorthand, usage) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, shorthand, usage) +} diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go new file mode 100644 index 0000000000..3dee424791 --- /dev/null +++ b/vendor/github.com/spf13/pflag/time.go @@ -0,0 +1,124 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// TimeValue adapts time.Time for use as a flag. +type timeValue struct { + *time.Time + formats []string +} + +func newTimeValue(val time.Time, p *time.Time, formats []string) *timeValue { + *p = val + return &timeValue{ + Time: p, + formats: formats, + } +} + +// Set time.Time value from string based on accepted formats. +func (d *timeValue) Set(s string) error { + s = strings.TrimSpace(s) + for _, f := range d.formats { + v, err := time.Parse(f, s) + if err != nil { + continue + } + *d.Time = v + return nil + } + + formatsString := "" + for i, f := range d.formats { + if i > 0 { + formatsString += ", " + } + formatsString += fmt.Sprintf("`%s`", f) + } + + return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString) +} + +// Type name for time.Time flags. +func (d *timeValue) Type() string { + return "time" +} + +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} + +// GetTime return the time value of a flag with the given name +func (f *FlagSet) GetTime(name string) (time.Time, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return time.Time{}, err + } + + if flag.Value.Type() != "time" { + err := fmt.Errorf("trying to get %s value of flag of type %s", "time", flag.Value.Type()) + return time.Time{}, err + } + + val, ok := flag.Value.(*timeValue) + if !ok { + return time.Time{}, fmt.Errorf("value %s is not a time", flag.Value) + } + + return *val.Time, nil +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func (f *FlagSet) TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + f.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + f.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + CommandLine.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + CommandLine.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func (f *FlagSet) Time(name string, value time.Time, formats []string, usage string) *time.Time { + return f.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + p := new(time.Time) + f.TimeVarP(p, name, shorthand, value, formats, usage) + return p +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func Time(name string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, shorthand, value, formats, usage) +} diff --git a/vendor/go.uber.org/automaxprocs/.codecov.yml b/vendor/go.uber.org/automaxprocs/.codecov.yml deleted file mode 100644 index 9a2ed4a996..0000000000 --- a/vendor/go.uber.org/automaxprocs/.codecov.yml +++ /dev/null @@ -1,14 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 90% # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/vendor/go.uber.org/automaxprocs/.gitignore b/vendor/go.uber.org/automaxprocs/.gitignore deleted file mode 100644 index dd7bcf5130..0000000000 --- a/vendor/go.uber.org/automaxprocs/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -vendor - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -*.pprof -*.out -*.log -coverage.txt - -/bin -cover.out -cover.html diff --git a/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/vendor/go.uber.org/automaxprocs/CHANGELOG.md deleted file mode 100644 index f421056ae8..0000000000 --- a/vendor/go.uber.org/automaxprocs/CHANGELOG.md +++ /dev/null @@ -1,52 +0,0 @@ -# Changelog - -## v1.6.0 (2024-07-24) - -- Add RoundQuotaFunc option that allows configuration of rounding - behavior for floating point CPU quota. - -## v1.5.3 (2023-07-19) - -- Fix mountinfo parsing when super options have fields with spaces. -- Fix division by zero while parsing cgroups. - -## v1.5.2 (2023-03-16) - -- Support child control cgroups -- Fix file descriptor leak -- Update dependencies - -## v1.5.1 (2022-04-06) - -- Fix cgroups v2 mountpoint detection. - -## v1.5.0 (2022-04-05) - -- Add support for cgroups v2. - -Thanks to @emadolsky for their contribution to this release. - -## v1.4.0 (2021-02-01) - -- Support colons in cgroup names. -- Remove linters from runtime dependencies. - -## v1.3.0 (2020-01-23) - -- Migrate to Go modules. - -## v1.2.0 (2018-02-22) - -- Fixed quota clamping to always round down rather than up; Rather than - guaranteeing constant throttling at saturation, instead assume that the - fractional CPU was added as a hedge for factors outside of Go's scheduler. - -## v1.1.0 (2017-11-10) - -- Log the new value of `GOMAXPROCS` rather than the current value. -- Make logs more explicit about whether `GOMAXPROCS` was modified or not. -- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. - -## v1.0.0 (2017-08-09) - -- Initial release. diff --git a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md deleted file mode 100644 index e327d9aa5c..0000000000 --- a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual -identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an -appointed representative at an online or offline event. Representation of a -project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at oss-conduct@uber.com. The project -team will review and investigate all complaints, and will respond in a way -that it deems appropriate to the circumstances. The project team is obligated -to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -[http://contributor-covenant.org/version/1/4][version]. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md deleted file mode 100644 index 2b6a6040d7..0000000000 --- a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md +++ /dev/null @@ -1,81 +0,0 @@ -# Contributing - -We'd love your help improving this package! - -If you'd like to add new exported APIs, please [open an issue][open-issue] -describing your proposal — discussing API changes ahead of time makes -pull request review much smoother. In your issue, pull request, and any other -communications, please remember to treat your fellow contributors with -respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. - -Note that you'll need to sign [Uber's Contributor License Agreement][cla] -before we can accept any of your contributions. If necessary, a bot will remind -you to accept the CLA when you open your pull request. - -## Setup - -[Fork][fork], then clone the repository: - -``` -mkdir -p $GOPATH/src/go.uber.org -cd $GOPATH/src/go.uber.org -git clone git@github.com:your_github_username/automaxprocs.git -cd automaxprocs -git remote add upstream https://github.com/uber-go/automaxprocs.git -git fetch upstream -``` - -Install the test dependencies: - -``` -make dependencies -``` - -Make sure that the tests and the linters pass: - -``` -make test -make lint -``` - -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - -## Making Changes - -Start by creating a new branch for your changes: - -``` -cd $GOPATH/src/go.uber.org/automaxprocs -git checkout master -git fetch upstream -git rebase upstream/master -git checkout -b cool_new_feature -``` - -Make your changes, then ensure that `make lint` and `make test` still pass. If -you're satisfied with your changes, push them to your fork. - -``` -git push origin cool_new_feature -``` - -Then use the GitHub UI to open a pull request. - -At this point, you're waiting on us to review your changes. We *try* to respond -to issues and pull requests within a few business days, and we may suggest some -improvements or alternatives. Once your changes are approved, one of the -project maintainers will merge them. - -We're much more likely to approve your changes if you: - -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. - -[fork]: https://github.com/uber-go/automaxprocs/fork -[open-issue]: https://github.com/uber-go/automaxprocs/issues/new -[cla]: https://cla-assistant.io/uber-go/automaxprocs -[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/automaxprocs/Makefile b/vendor/go.uber.org/automaxprocs/Makefile deleted file mode 100644 index 1642b71480..0000000000 --- a/vendor/go.uber.org/automaxprocs/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -export GOBIN ?= $(shell pwd)/bin - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck - -.PHONY: build -build: - go build ./... - -.PHONY: install -install: - go mod download - -.PHONY: test -test: - go test -race ./... - -.PHONY: cover -cover: - go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html - -$(GOLINT): tools/go.mod - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): tools/go.mod - cd tools && go install honnef.co/go/tools/cmd/staticcheck@2023.1.2 - -.PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking gofmt" - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking go vet" - @go vet ./... 2>&1 | tee -a lint.log - @echo "Checking golint" - @$(GOLINT) ./... | tee -a lint.log - @echo "Checking staticcheck" - @$(STATICCHECK) ./... 2>&1 | tee -a lint.log - @echo "Checking for license headers..." - @./.build/check_license.sh | tee -a lint.log - @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/automaxprocs/README.md b/vendor/go.uber.org/automaxprocs/README.md deleted file mode 100644 index bfed32adae..0000000000 --- a/vendor/go.uber.org/automaxprocs/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Automatically set `GOMAXPROCS` to match Linux container CPU quota. - -## Installation - -`go get -u go.uber.org/automaxprocs` - -## Quick Start - -```go -import _ "go.uber.org/automaxprocs" - -func main() { - // Your application logic here. -} -``` - -# Performance -Data measured from Uber's internal load balancer. We ran the load balancer with 200% CPU quota (i.e., 2 cores): - -| GOMAXPROCS | RPS | P50 (ms) | P99.9 (ms) | -| ------------------ | --------- | -------- | ---------- | -| 1 | 28,893.18 | 1.46 | 19.70 | -| 2 (equal to quota) | 44,715.07 | 0.84 | 26.38 | -| 3 | 44,212.93 | 0.66 | 30.07 | -| 4 | 41,071.15 | 0.57 | 42.94 | -| 8 | 33,111.69 | 0.43 | 64.32 | -| Default (24) | 22,191.40 | 0.45 | 76.19 | - -When `GOMAXPROCS` is increased above the CPU quota, we see P50 decrease slightly, but see significant increases to P99. We also see that the total RPS handled also decreases. - -When `GOMAXPROCS` is higher than the CPU quota allocated, we also saw significant throttling: - -``` -$ cat /sys/fs/cgroup/cpu,cpuacct/system.slice/[...]/cpu.stat -nr_periods 42227334 -nr_throttled 131923 -throttled_time 88613212216618 -``` - -Once `GOMAXPROCS` was reduced to match the CPU quota, we saw no CPU throttling. - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -automaxprocs to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep -an eye on issues and pull requests, but you can also report any negative -conduct to oss-conduct@uber.com. That email list is a private, safe space; -even the automaxprocs maintainers don't have access, so don't hesitate to hold -us to a high standard. - -
- -Released under the [MIT License](LICENSE). - -[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg -[doc]: https://godoc.org/go.uber.org/automaxprocs -[ci-img]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/automaxprocs - - diff --git a/vendor/go.uber.org/automaxprocs/automaxprocs.go b/vendor/go.uber.org/automaxprocs/automaxprocs.go deleted file mode 100644 index 69946a3e1f..0000000000 --- a/vendor/go.uber.org/automaxprocs/automaxprocs.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package automaxprocs automatically sets GOMAXPROCS to match the Linux -// container CPU quota, if any. -package automaxprocs // import "go.uber.org/automaxprocs" - -import ( - "log" - - "go.uber.org/automaxprocs/maxprocs" -) - -func init() { - maxprocs.Set(maxprocs.Logger(log.Printf)) -} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go deleted file mode 100644 index 113555f63d..0000000000 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package cgroups provides utilities to access Linux control group (CGroups) -// parameters (CPU quota, for example) for a given process. -package cgroups diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go deleted file mode 100644 index e561fe60b2..0000000000 --- a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to -// match the configured Linux CPU quota. Unlike the top-level automaxprocs -// package, it lets the caller configure logging and handle errors. -package maxprocs // import "go.uber.org/automaxprocs/maxprocs" - -import ( - "os" - "runtime" - - iruntime "go.uber.org/automaxprocs/internal/runtime" -) - -const _maxProcsKey = "GOMAXPROCS" - -func currentMaxProcs() int { - return runtime.GOMAXPROCS(0) -} - -type config struct { - printf func(string, ...interface{}) - procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) - minGOMAXPROCS int - roundQuotaFunc func(v float64) int -} - -func (c *config) log(fmt string, args ...interface{}) { - if c.printf != nil { - c.printf(fmt, args...) - } -} - -// An Option alters the behavior of Set. -type Option interface { - apply(*config) -} - -// Logger uses the supplied printf implementation for log output. By default, -// Set doesn't log anything. -func Logger(printf func(string, ...interface{})) Option { - return optionFunc(func(cfg *config) { - cfg.printf = printf - }) -} - -// Min sets the minimum GOMAXPROCS value that will be used. -// Any value below 1 is ignored. -func Min(n int) Option { - return optionFunc(func(cfg *config) { - if n >= 1 { - cfg.minGOMAXPROCS = n - } - }) -} - -// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. -func RoundQuotaFunc(rf func(v float64) int) Option { - return optionFunc(func(cfg *config) { - cfg.roundQuotaFunc = rf - }) -} - -type optionFunc func(*config) - -func (of optionFunc) apply(cfg *config) { of(cfg) } - -// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning -// any error encountered and an undo function. -// -// Set is a no-op on non-Linux systems and in Linux environments without a -// configured CPU quota. -func Set(opts ...Option) (func(), error) { - cfg := &config{ - procs: iruntime.CPUQuotaToGOMAXPROCS, - roundQuotaFunc: iruntime.DefaultRoundFunc, - minGOMAXPROCS: 1, - } - for _, o := range opts { - o.apply(cfg) - } - - undoNoop := func() { - cfg.log("maxprocs: No GOMAXPROCS change to reset") - } - - // Honor the GOMAXPROCS environment variable if present. Otherwise, amend - // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is - // Linux, and guarantee a minimum value of 1. The minimum guaranteed value - // can be overridden using `maxprocs.Min()`. - if max, exists := os.LookupEnv(_maxProcsKey); exists { - cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) - return undoNoop, nil - } - - maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) - if err != nil { - return undoNoop, err - } - - if status == iruntime.CPUQuotaUndefined { - cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) - return undoNoop, nil - } - - prev := currentMaxProcs() - undo := func() { - cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) - runtime.GOMAXPROCS(prev) - } - - switch status { - case iruntime.CPUQuotaMinUsed: - cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) - case iruntime.CPUQuotaUsed: - cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) - } - - runtime.GOMAXPROCS(maxProcs) - return undo, nil -} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go deleted file mode 100644 index cc7fc5aee1..0000000000 --- a/vendor/go.uber.org/automaxprocs/maxprocs/version.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package maxprocs - -// Version is the current package version. -const Version = "1.6.0" diff --git a/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go b/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go new file mode 100644 index 0000000000..4596c3d28d --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/model/model_gotypes.go @@ -0,0 +1,160 @@ +package model + +import ( + "fmt" + "go/types" +) + +// InterfaceFromGoTypesType returns a pointer to an interface for the +// given interface type loaded from archive. +func InterfaceFromGoTypesType(it *types.Interface) (*Interface, error) { + intf := &Interface{} + + for i := 0; i < it.NumMethods(); i++ { + mt := it.Method(i) + // Skip unexported methods. + if !mt.Exported() { + continue + } + m := &Method{ + Name: mt.Name(), + } + + var err error + m.In, m.Variadic, m.Out, err = funcArgsFromGoTypesType(mt.Type().(*types.Signature)) + if err != nil { + return nil, fmt.Errorf("method %q: %w", mt.Name(), err) + } + + intf.AddMethod(m) + } + + return intf, nil +} + +func funcArgsFromGoTypesType(t *types.Signature) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) { + nin := t.Params().Len() + if t.Variadic() { + nin-- + } + for i := 0; i < nin; i++ { + p, err := parameterFromGoTypesType(t.Params().At(i), false) + if err != nil { + return nil, nil, nil, err + } + in = append(in, p) + } + if t.Variadic() { + p, err := parameterFromGoTypesType(t.Params().At(nin), true) + if err != nil { + return nil, nil, nil, err + } + variadic = p + } + for i := 0; i < t.Results().Len(); i++ { + p, err := parameterFromGoTypesType(t.Results().At(i), false) + if err != nil { + return nil, nil, nil, err + } + out = append(out, p) + } + return +} + +func parameterFromGoTypesType(v *types.Var, variadic bool) (*Parameter, error) { + t := v.Type() + if variadic { + t = t.(*types.Slice).Elem() + } + tt, err := typeFromGoTypesType(t) + if err != nil { + return nil, err + } + return &Parameter{Name: v.Name(), Type: tt}, nil +} + +func typeFromGoTypesType(t types.Type) (Type, error) { + if t, ok := t.(*types.Named); ok { + tn := t.Obj() + if tn.Pkg() == nil { + return PredeclaredType(tn.Name()), nil + } + return &NamedType{ + Package: tn.Pkg().Path(), + Type: tn.Name(), + }, nil + } + + // only unnamed or predeclared types after here + + // Lots of types have element types. Let's do the parsing and error checking for all of them. + var elemType Type + if t, ok := t.(interface{ Elem() types.Type }); ok { + var err error + elemType, err = typeFromGoTypesType(t.Elem()) + if err != nil { + return nil, err + } + } + + switch t := t.(type) { + case *types.Array: + return &ArrayType{ + Len: int(t.Len()), + Type: elemType, + }, nil + case *types.Basic: + return PredeclaredType(t.String()), nil + case *types.Chan: + var dir ChanDir + switch t.Dir() { + case types.RecvOnly: + dir = RecvDir + case types.SendOnly: + dir = SendDir + } + return &ChanType{ + Dir: dir, + Type: elemType, + }, nil + case *types.Signature: + in, variadic, out, err := funcArgsFromGoTypesType(t) + if err != nil { + return nil, err + } + return &FuncType{ + In: in, + Out: out, + Variadic: variadic, + }, nil + case *types.Interface: + if t.NumMethods() == 0 { + return PredeclaredType("interface{}"), nil + } + case *types.Map: + kt, err := typeFromGoTypesType(t.Key()) + if err != nil { + return nil, err + } + return &MapType{ + Key: kt, + Value: elemType, + }, nil + case *types.Pointer: + return &PointerType{ + Type: elemType, + }, nil + case *types.Slice: + return &ArrayType{ + Len: -1, + Type: elemType, + }, nil + case *types.Struct: + if t.NumFields() == 0 { + return PredeclaredType("struct{}"), nil + } + // TODO: UnsafePointer + } + + return nil, fmt.Errorf("can't yet turn %v (%T) into a model.Type", t.String(), t) +} diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 0000000000..7348c50c0c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE rename to vendor/go.yaml.in/yaml/v2/LICENSE diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml diff --git a/openshift/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/go.yaml.in/yaml/v2/NOTICE similarity index 54% rename from openshift/vendor/sigs.k8s.io/yaml/yaml_go110.go rename to vendor/go.yaml.in/yaml/v2/NOTICE index 94abc1719d..866d74a7ad 100644 --- a/openshift/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ b/vendor/go.yaml.in/yaml/v2/NOTICE @@ -1,10 +1,4 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -//go:build go1.10 -// +build go1.10 - -/* -Copyright 2021 The Kubernetes Authors. +Copyright 2011-2016 Canonical Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,15 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} diff --git a/vendor/go.yaml.in/yaml/v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 0000000000..c9388da425 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go rename to vendor/go.yaml.in/yaml/v2/apic.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go rename to vendor/go.yaml.in/yaml/v2/decode.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go rename to vendor/go.yaml.in/yaml/v2/emitterc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go rename to vendor/go.yaml.in/yaml/v2/encode.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go rename to vendor/go.yaml.in/yaml/v2/parserc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go rename to vendor/go.yaml.in/yaml/v2/readerc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go rename to vendor/go.yaml.in/yaml/v2/resolve.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go rename to vendor/go.yaml.in/yaml/v2/scannerc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go rename to vendor/go.yaml.in/yaml/v2/sorter.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go rename to vendor/go.yaml.in/yaml/v2/writerc.go diff --git a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go similarity index 99% rename from openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go rename to vendor/go.yaml.in/yaml/v2/yaml.go index 30813884c0..5248e1263c 100644 --- a/openshift/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go +++ b/vendor/go.yaml.in/yaml/v2/yaml.go @@ -2,7 +2,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml +// https://github.com/yaml/go-yaml // package yaml diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go rename to vendor/go.yaml.in/yaml/v2/yamlh.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go similarity index 100% rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 0000000000..2683e4bb1f --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/hack/tools/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/go.yaml.in/yaml/v3/NOTICE similarity index 54% rename from hack/tools/vendor/sigs.k8s.io/yaml/yaml_go110.go rename to vendor/go.yaml.in/yaml/v3/NOTICE index 94abc1719d..866d74a7ad 100644 --- a/hack/tools/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ b/vendor/go.yaml.in/yaml/v3/NOTICE @@ -1,10 +1,4 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -//go:build go1.10 -// +build go1.10 - -/* -Copyright 2021 The Kubernetes Authors. +Copyright 2011-2016 Canonical Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,15 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 0000000000..15a85a6350 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 0000000000..05fd305da1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 0000000000..02e2b17bfe --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 0000000000..ab4e03ba72 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 0000000000..de9e72a3e6 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 0000000000..25fe823637 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 0000000000..56af245366 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 0000000000..64ae888057 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 0000000000..30b1f08920 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 0000000000..9210ece7e9 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 0000000000..266d0b092c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 0000000000..0b101cd20d --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 0000000000..f59aa40f64 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 0000000000..dea1ba9610 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 0000000000..d25979d9f5 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 0000000000..90ef6a241d --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 0000000000..cf254f5f1e --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 0000000000..4b0f8097f9 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index a3dc629c62..139fa31e1b 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -233,7 +233,11 @@ func parseCert(in []byte, privAlgo string) (*Certificate, error) { if err != nil { return nil, err } - + // The Type() function is intended to return only certificate key types, but + // we use certKeyAlgoNames anyway for safety, to match [Certificate.Type]. + if _, ok := certKeyAlgoNames[k.Type()]; ok { + return nil, fmt.Errorf("ssh: the signature key type %q is invalid for certificates", k.Type()) + } c.SignatureKey = k c.Signature, rest, ok = parseSignatureBody(g.Signature) if !ok || len(rest) > 0 { @@ -301,16 +305,13 @@ type CertChecker struct { SupportedCriticalOptions []string // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. + // authority for user certificate. This must be set if this CertChecker + // will be checking user certificates. IsUserAuthority func(auth PublicKey) bool // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. + // an authority for this host. This must be set if this CertChecker + // will be checking host certificates. IsHostAuthority func(auth PublicKey, address string) bool // Clock is used for verifying time stamps. If nil, time.Now @@ -447,12 +448,19 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { // SignCert signs the certificate with an authority, setting the Nonce, // SignatureKey, and Signature fields. If the authority implements the // MultiAlgorithmSigner interface the first algorithm in the list is used. This -// is useful if you want to sign with a specific algorithm. +// is useful if you want to sign with a specific algorithm. As specified in +// [SSH-CERTS], Section 2.1.1, authority can't be a [Certificate]. func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { c.Nonce = make([]byte, 32) if _, err := io.ReadFull(rand, c.Nonce); err != nil { return err } + // The Type() function is intended to return only certificate key types, but + // we use certKeyAlgoNames anyway for safety, to match [Certificate.Type]. + if _, ok := certKeyAlgoNames[authority.PublicKey().Type()]; ok { + return fmt.Errorf("ssh: certificates cannot be used as authority (public key type %q)", + authority.PublicKey().Type()) + } c.SignatureKey = authority.PublicKey() if v, ok := authority.(MultiAlgorithmSigner); ok { diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index b86dde151d..c12818fdc5 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -289,7 +289,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA } } - algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos, true) if err != nil { // If there is no overlap, return the fallback algorithm to support // servers that fail to list all supported algorithms. diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 0415d33968..f2ec0896c2 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -336,7 +336,7 @@ func parseError(tag uint8) error { return fmt.Errorf("ssh: parse error in message type %d", tag) } -func findCommon(what string, client []string, server []string) (common string, err error) { +func findCommon(what string, client []string, server []string, isClient bool) (string, error) { for _, c := range client { for _, s := range server { if c == s { @@ -344,7 +344,32 @@ func findCommon(what string, client []string, server []string) (common string, e } } } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) + err := &AlgorithmNegotiationError{ + What: what, + } + if isClient { + err.SupportedAlgorithms = client + err.RequestedAlgorithms = server + } else { + err.SupportedAlgorithms = server + err.RequestedAlgorithms = client + } + return "", err +} + +// AlgorithmNegotiationError defines the error returned if the client and the +// server cannot agree on an algorithm for key exchange, host key, cipher, MAC. +type AlgorithmNegotiationError struct { + What string + // RequestedAlgorithms lists the algorithms supported by the peer. + RequestedAlgorithms []string + // SupportedAlgorithms lists the algorithms supported on our side. + SupportedAlgorithms []string +} + +func (a *AlgorithmNegotiationError) Error() string { + return fmt.Sprintf("ssh: no common algorithm for %s; we offered: %v, peer offered: %v", + a.What, a.SupportedAlgorithms, a.RequestedAlgorithms) } // DirectionAlgorithms defines the algorithms negotiated in one direction @@ -379,12 +404,12 @@ var aeadCiphers = map[string]bool{ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *NegotiatedAlgorithms, err error) { result := &NegotiatedAlgorithms{} - result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos, isClient) if err != nil { return } - result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos, isClient) if err != nil { return } @@ -394,36 +419,36 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs ctos, stoc = stoc, ctos } - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer, isClient) if err != nil { return } - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient, isClient) if err != nil { return } if !aeadCiphers[ctos.Cipher] { - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer, isClient) if err != nil { return } } if !aeadCiphers[stoc.Cipher] { - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient, isClient) if err != nil { return } } - ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer, isClient) if err != nil { return } - stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient, isClient) if err != nil { return } diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index f5d352fe3a..04ccce3461 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -16,6 +16,7 @@ References: [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + [SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01 This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 566e09d5a1..a28c0de503 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -273,7 +273,7 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str return nil, "", nil, nil, errors.New("ssh: no key found") } -// ParsePublicKey parses an SSH public key formatted for use in +// ParsePublicKey parses an SSH public key or certificate formatted for use in // the SSH wire protocol according to RFC 4253, section 6.6. func ParsePublicKey(in []byte) (out PublicKey, err error) { algo, in, ok := parseString(in) diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 97bd8b06f7..db3264da8c 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -509,7 +509,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } return nil, ErrFrameTooLarge } diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6c18ea230b..ea5ae629fd 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -11,8 +11,6 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index c646a6952e..3aaffdd1f7 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -508,7 +508,7 @@ const eventsHTML = ` {{$el.When}} {{$el.ElapsedTime}} - {{$el.Title}} + {{$el.Title}} {{if $.Expanded}} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go index 03265e888a..8c7c475f2d 100644 --- a/vendor/golang.org/x/oauth2/internal/doc.go +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989beaf4..71ea6ad1f5 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef0f..8389f24629 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index b9db01ddfd..afc0aeb274 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80..de34feb844 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -22,9 +22,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +46,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is @@ -135,7 +135,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } - return tk, err + return tk, nil } // reuseTokenSource is a TokenSource that holds a single token in memory @@ -305,8 +305,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -356,15 +355,19 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { return internal.ContextClient(ctx) } + cc := internal.ContextClient(ctx) return &http.Client{ Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, + Base: cc.Transport, Source: ReuseTokenSource(nil, src), }, + CheckRedirect: cc.CheckRedirect, + Jar: cc.Jar, + Timeout: cc.Timeout, } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -372,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -393,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index 50593b6dfe..cea8374d51 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 109997d77c..239ec32962 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,13 +163,14 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. +// with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 90657915fb..8bbebbac9e 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index cb6bb9ad3b..1d8cffae8c 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -12,8 +12,6 @@ package errgroup import ( "context" "fmt" - "runtime" - "runtime/debug" "sync" ) @@ -33,10 +31,6 @@ type Group struct { errOnce sync.Once err error - - mu sync.Mutex - panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked. - abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit). } func (g *Group) done() { @@ -56,22 +50,13 @@ func WithContext(ctx context.Context) (*Group, context.Context) { return &Group{cancel: cancel}, ctx } -// Wait blocks until all function calls from the Go method have returned -// normally, then returns the first non-nil error (if any) from them. -// -// If any of the calls panics, Wait panics with a [PanicValue]; -// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit. +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { g.cancel(g.err) } - if g.panicValue != nil { - panic(g.panicValue) - } - if g.abnormal { - runtime.Goexit() - } return g.err } @@ -81,53 +66,31 @@ func (g *Group) Wait() error { // It blocks until the new goroutine can be added without the number of // goroutines in the group exceeding the configured limit. // -// The first goroutine in the group that returns a non-nil error, panics, or -// invokes [runtime.Goexit] will cancel the associated Context, if any. +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} } - g.add(f) -} - -func (g *Group) add(f func() error) { g.wg.Add(1) go func() { defer g.done() - normalReturn := false - defer func() { - if normalReturn { - return - } - v := recover() - g.mu.Lock() - defer g.mu.Unlock() - if !g.abnormal { - if g.cancel != nil { - g.cancel(g.err) - } - g.abnormal = true - } - if v != nil && g.panicValue == nil { - switch v := v.(type) { - case error: - g.panicValue = PanicError{ - Recovered: v, - Stack: debug.Stack(), - } - default: - g.panicValue = PanicValue{ - Recovered: v, - Stack: debug.Stack(), - } - } - } - }() - err := f() - normalReturn = true - if err != nil { + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + + if err := f(); err != nil { g.errOnce.Do(func() { g.err = err if g.cancel != nil { @@ -152,7 +115,19 @@ func (g *Group) TryGo(f func() error) bool { } } - g.add(f) + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() return true } @@ -174,34 +149,3 @@ func (g *Group) SetLimit(n int) { } g.sem = make(chan token, n) } - -// PanicError wraps an error recovered from an unhandled panic -// when calling a function passed to Go or TryGo. -type PanicError struct { - Recovered error - Stack []byte // result of call to [debug.Stack] -} - -func (p PanicError) Error() string { - if len(p.Stack) > 0 { - return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack) - } - return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) -} - -func (p PanicError) Unwrap() error { return p.Recovered } - -// PanicValue wraps a value that does not implement the error interface, -// recovered from an unhandled panic when calling a function passed to Go or -// TryGo. -type PanicValue struct { - Recovered any - Stack []byte // result of call to [debug.Stack] -} - -func (p PanicValue) String() string { - if len(p.Stack) > 0 { - return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack) - } - return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c31..d1c8b2640e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3b..7838ca5db2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8f..b6db27d937 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,6 +319,7 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f AUDIT_IPE_ACCESS = 0x58c @@ -327,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -491,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -527,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -554,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -843,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -936,11 +942,10 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -1203,13 +1208,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1224,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1240,6 +1253,7 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 @@ -1247,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1266,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1574,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1625,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1687,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1809,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2485,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2644,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2724,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2787,7 +2813,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2864,10 +2890,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2917,11 +2945,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2970,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2987,11 +3018,12 @@ const ( RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x7f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3271,6 +3303,7 @@ const ( STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3322,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3392,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3503,6 +3534,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3515,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3559,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3673,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c7..1c37f9fbc4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -372,6 +374,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda535..6f54d34aef 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -373,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab86..783ec5c126 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -378,6 +380,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d3..ca83d3ba16 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa9..607e611c0c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -365,6 +367,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37a..b9cb5bd3c0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96a..65b078a638 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902a..5298a3033d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e27..7bc557c876 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c224272615..152399bb04 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -426,6 +428,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee13..1a1ce2409c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941f..4231a1fb57 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc42..21c0e95266 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -362,6 +364,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb80..f00d1cd7cf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -434,6 +436,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e57514..bc8d539e6a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -473,6 +475,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306..aca56ee494 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e..2ea1ef58c3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e5029744..d22c8af319 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51b..5ee264ae97 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18..f9f03ebf5f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b..87c2118e84 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278..391ad102fb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e6..5656157757 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc22..0482b52e3c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb1..71806f08f3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b446365025..e35a710582 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c188..2aea476705 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 8405391698..6c9bb4e560 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6..680bc9915a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d..620f271052 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe6472..cd236443f6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -114,8 +114,10 @@ type Statx_t struct { Atomic_write_unit_min uint32 Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 _ [1]uint32 - _ [9]uint64 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -2226,8 +2229,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2314,6 +2320,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2594,8 +2605,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3802,7 +3813,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2d + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3862,17 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3949,7 +3979,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4015,7 +4050,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x6 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4101,6 +4138,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4613,6 +4663,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4623,6 +4674,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4682,6 +4734,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4717,6 +4770,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4747,9 +4801,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4774,9 +4829,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4809,12 +4867,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4943,7 +5003,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4978,6 +5040,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -5001,6 +5065,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -5032,6 +5100,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -5117,7 +5188,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5161,6 +5233,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5180,6 +5253,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5247,6 +5321,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5262,6 +5337,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5281,9 +5357,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5295,8 +5374,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5343,7 +5424,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5351,12 +5435,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5364,8 +5450,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5430,6 +5519,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5458,9 +5548,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5703,11 +5794,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5753,6 +5849,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5770,14 +5868,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5788,7 +5891,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5849,6 +5955,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -6007,6 +6114,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -6038,6 +6152,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da43f..485f2d3a1b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e1864..ecbd1ad8bc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108b6..02f0463a44 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f1f..6f4d400d24 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26c1..cd532cfa55 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2f1..4133620851 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d45356..eaa37eb718 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea1866..98ae6a1e4a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c48..cae1961594 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 8359728759..6ce3b4e028 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c68..c7429c6a14 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb62b..4bf4baf4ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ad05b51a60..e9709d70af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce90037..fb44268ca7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b56739c..9c38265c74 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index df6bf948e1..0ddd81c02a 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index 13e9a64ad1..bddb2e2aeb 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 6e34df4613..0fb4e7eea8 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -113,7 +113,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // childrenOf elides the FuncType node beneath FuncDecl. // Add it back here for TypeParams, Params, Results, // all FieldLists). But we don't add it back for the "func" token - // even though it is is the tree at FuncDecl.Type.Func. + // even though it is the tree at FuncDecl.Type.Func. if decl, ok := node.(*ast.FuncDecl); ok { if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { path = append(path, decl.Type) @@ -207,6 +207,9 @@ func childrenOf(n ast.Node) []ast.Node { return false // no recursion }) + // TODO(adonovan): be more careful about missing (!Pos.Valid) + // tokens in trees produced from invalid input. + // Then add fake Nodes for bare tokens. switch n := n.(type) { case *ast.ArrayType: @@ -226,9 +229,12 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, tok(n.OpPos, len(n.Op.String()))) case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) + if n.Lbrace.IsValid() { + children = append(children, tok(n.Lbrace, len("{"))) + } + if n.Rbrace.IsValid() { + children = append(children, tok(n.Rbrace, len("}"))) + } case *ast.BranchStmt: children = append(children, @@ -304,9 +310,12 @@ func childrenOf(n ast.Node) []ast.Node { // TODO(adonovan): Field.{Doc,Comment,Tag}? case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), // or len("[") - tok(n.Closing, len(")"))) // or len("]") + if n.Opening.IsValid() { + children = append(children, tok(n.Opening, len("("))) + } + if n.Closing.IsValid() { + children = append(children, tok(n.Closing, len(")"))) + } case *ast.File: // TODO test: Doc diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 5c8dbbb7a3..4ad0549304 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -67,6 +67,10 @@ var abort = new(int) // singleton, to signal termination of Apply // // The methods Replace, Delete, InsertBefore, and InsertAfter // can be used to change the AST without disrupting Apply. +// +// This type is not to be confused with [inspector.Cursor] from +// package [golang.org/x/tools/go/ast/inspector], which provides +// stateless navigation of immutable syntax trees. type Cursor struct { parent ast.Node name string diff --git a/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/vendor/golang.org/x/tools/go/ast/edge/edge.go similarity index 100% rename from vendor/golang.org/x/tools/internal/astutil/edge/edge.go rename to vendor/golang.org/x/tools/go/ast/edge/edge.go diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go new file mode 100644 index 0000000000..31c8d2f240 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -0,0 +1,502 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +import ( + "fmt" + "go/ast" + "go/token" + "iter" + "reflect" + + "golang.org/x/tools/go/ast/edge" +) + +// A Cursor represents an [ast.Node]. It is immutable. +// +// Two Cursors compare equal if they represent the same node. +// +// Call [Inspector.Root] to obtain a valid cursor for the virtual root +// node of the traversal. +// +// Use the following methods to navigate efficiently around the tree: +// - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing]; +// - for children, use [Cursor.Child], [Cursor.Children], +// [Cursor.FirstChild], and [Cursor.LastChild]; +// - for siblings, use [Cursor.PrevSibling] and [Cursor.NextSibling]; +// - for descendants, use [Cursor.FindByPos], [Cursor.FindNode], +// [Cursor.Inspect], and [Cursor.Preorder]. +// +// Use the [Cursor.ChildAt] and [Cursor.ParentEdge] methods for +// information about the edges in a tree: which field (and slice +// element) of the parent node holds the child. +type Cursor struct { + in *Inspector + index int32 // index of push node; -1 for virtual root node +} + +// Root returns a cursor for the virtual root node, +// whose children are the files provided to [New]. +// +// Its [Cursor.Node] and [Cursor.Stack] methods return nil. +func (in *Inspector) Root() Cursor { + return Cursor{in, -1} +} + +// At returns the cursor at the specified index in the traversal, +// which must have been obtained from [Cursor.Index] on a Cursor +// belonging to the same Inspector (see [Cursor.Inspector]). +func (in *Inspector) At(index int32) Cursor { + if index < 0 { + panic("negative index") + } + if int(index) >= len(in.events) { + panic("index out of range for this inspector") + } + if in.events[index].index < index { + panic("invalid index") // (a push, not a pop) + } + return Cursor{in, index} +} + +// Inspector returns the cursor's Inspector. +func (c Cursor) Inspector() *Inspector { return c.in } + +// Index returns the index of this cursor position within the package. +// +// Clients should not assume anything about the numeric Index value +// except that it increases monotonically throughout the traversal. +// It is provided for use with [At]. +// +// Index must not be called on the Root node. +func (c Cursor) Index() int32 { + if c.index < 0 { + panic("Index called on Root node") + } + return c.index +} + +// Node returns the node at the current cursor position, +// or nil for the cursor returned by [Inspector.Root]. +func (c Cursor) Node() ast.Node { + if c.index < 0 { + return nil + } + return c.in.events[c.index].node +} + +// String returns information about the cursor's node, if any. +func (c Cursor) String() string { + if c.in == nil { + return "(invalid)" + } + if c.index < 0 { + return "(root)" + } + return reflect.TypeOf(c.Node()).String() +} + +// indices return the [start, end) half-open interval of event indices. +func (c Cursor) indices() (int32, int32) { + if c.index < 0 { + return 0, int32(len(c.in.events)) // root: all events + } else { + return c.index, c.in.events[c.index].index + 1 // just one subtree + } +} + +// Preorder returns an iterator over the nodes of the subtree +// represented by c in depth-first order. Each node in the sequence is +// represented by a Cursor that allows access to the Node, but may +// also be used to start a new traversal, or to obtain the stack of +// nodes enclosing the cursor. +// +// The traversal sequence is determined by [ast.Inspect]. The types +// argument, if non-empty, enables type-based filtering of events. The +// function f if is called only for nodes whose type matches an +// element of the types slice. +// +// If you need control over descent into subtrees, +// or need both pre- and post-order notifications, use [Cursor.Inspect] +func (c Cursor) Preorder(types ...ast.Node) iter.Seq[Cursor] { + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain types: skip. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// Inspect visits the nodes of the subtree represented by c in +// depth-first order. It calls f(n) for each node n before it +// visits n's children. If f returns true, Inspect invokes f +// recursively for each of the non-nil children of the node. +// +// Each node is represented by a Cursor that allows access to the +// Node, but may also be used to start a new traversal, or to obtain +// the stack of nodes enclosing the cursor. +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (c Cursor) Inspect(types []ast.Node, f func(c Cursor) (descend bool)) { + mask := maskOf(types) + events := c.in.events + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 && !f(Cursor{c.in, i}) || + events[pop].typ&mask == 0 { + // The user opted not to descend, or the + // subtree does not contain types: + // skip past the pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Enclosing returns an iterator over the nodes enclosing the current +// current node, starting with the Cursor itself. +// +// Enclosing must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// The types argument, if non-empty, enables type-based filtering of +// events: the sequence includes only enclosing nodes whose type +// matches an element of the types slice. +func (c Cursor) Enclosing(types ...ast.Node) iter.Seq[Cursor] { + if c.index < 0 { + panic("Cursor.Enclosing called on Root node") + } + + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + for i := c.index; i >= 0; i = events[i].parent { + if events[i].typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + } + } +} + +// Parent returns the parent of the current node. +// +// Parent must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Parent() Cursor { + if c.index < 0 { + panic("Cursor.Parent called on Root node") + } + + return Cursor{c.in, c.in.events[c.index].parent} +} + +// ParentEdge returns the identity of the field in the parent node +// that holds this cursor's node, and if it is a list, the index within it. +// +// For example, f(x, y) is a CallExpr whose three children are Idents. +// f has edge kind [edge.CallExpr_Fun] and index -1. +// x and y have kind [edge.CallExpr_Args] and indices 0 and 1, respectively. +// +// If called on a child of the Root node, it returns ([edge.Invalid], -1). +// +// ParentEdge must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) ParentEdge() (edge.Kind, int) { + if c.index < 0 { + panic("Cursor.ParentEdge called on Root node") + } + events := c.in.events + pop := events[c.index].index + return unpackEdgeKindAndIndex(events[pop].parent) +} + +// ChildAt returns the cursor for the child of the +// current node identified by its edge and index. +// The index must be -1 if the edge.Kind is not a slice. +// The indicated child node must exist. +// +// ChildAt must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// Invariant: c.Parent().ChildAt(c.ParentEdge()) == c. +func (c Cursor) ChildAt(k edge.Kind, idx int) Cursor { + target := packEdgeKindAndIndex(k, idx) + + // Unfortunately there's no shortcut to looping. + events := c.in.events + i := c.index + 1 + for { + pop := events[i].index + if pop < i { + break + } + if events[pop].parent == target { + return Cursor{c.in, i} + } + i = pop + 1 + } + panic(fmt.Sprintf("ChildAt(%v, %d): no such child of %v", k, idx, c)) +} + +// Child returns the cursor for n, which must be a direct child of c's Node. +// +// Child must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Child(n ast.Node) Cursor { + if c.index < 0 { + panic("Cursor.Child called on Root node") + } + + if false { + // reference implementation + for child := range c.Children() { + if child.Node() == n { + return child + } + } + + } else { + // optimized implementation + events := c.in.events + for i := c.index + 1; events[i].index > i; i = events[i].index + 1 { + if events[i].node == n { + return Cursor{c.in, i} + } + } + } + panic(fmt.Sprintf("Child(%T): not a child of %v", n, c)) +} + +// NextSibling returns the cursor for the next sibling node in the same list +// (for example, of files, decls, specs, statements, fields, or expressions) as +// the current node. It returns (zero, false) if the node is the last node in +// the list, or is not part of a list. +// +// NextSibling must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) NextSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.NextSibling called on Root node") + } + + events := c.in.events + i := events[c.index].index + 1 // after corresponding pop + if i < int32(len(events)) { + if events[i].index > i { // push? + return Cursor{c.in, i}, true + } + } + return Cursor{}, false +} + +// PrevSibling returns the cursor for the previous sibling node in the +// same list (for example, of files, decls, specs, statements, fields, +// or expressions) as the current node. It returns zero if the node is +// the first node in the list, or is not part of a list. +// +// It must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) PrevSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.PrevSibling called on Root node") + } + + events := c.in.events + i := c.index - 1 + if i >= 0 { + if j := events[i].index; j < i { // pop? + return Cursor{c.in, j}, true + } + } + return Cursor{}, false +} + +// FirstChild returns the first direct child of the current node, +// or zero if it has no children. +func (c Cursor) FirstChild() (Cursor, bool) { + events := c.in.events + i := c.index + 1 // i=0 if c is root + if i < int32(len(events)) && events[i].index > i { // push? + return Cursor{c.in, i}, true + } + return Cursor{}, false +} + +// LastChild returns the last direct child of the current node, +// or zero if it has no children. +func (c Cursor) LastChild() (Cursor, bool) { + events := c.in.events + if c.index < 0 { // root? + if len(events) > 0 { + // return push of final event (a pop) + return Cursor{c.in, events[len(events)-1].index}, true + } + } else { + j := events[c.index].index - 1 // before corresponding pop + // Inv: j == c.index if c has no children + // or j is last child's pop. + if j > c.index { // c has children + return Cursor{c.in, events[j].index}, true + } + } + return Cursor{}, false +} + +// Children returns an iterator over the direct children of the +// current node, if any. +// +// When using Children, NextChild, and PrevChild, bear in mind that a +// Node's children may come from different fields, some of which may +// be lists of nodes without a distinguished intervening container +// such as [ast.BlockStmt]. +// +// For example, [ast.CaseClause] has a field List of expressions and a +// field Body of statements, so the children of a CaseClause are a mix +// of expressions and statements. Other nodes that have "uncontained" +// list fields include: +// +// - [ast.ValueSpec] (Names, Values) +// - [ast.CompositeLit] (Type, Elts) +// - [ast.IndexListExpr] (X, Indices) +// - [ast.CallExpr] (Fun, Args) +// - [ast.AssignStmt] (Lhs, Rhs) +// +// So, do not assume that the previous sibling of an ast.Stmt is also +// an ast.Stmt, or if it is, that they are executed sequentially, +// unless you have established that, say, its parent is a BlockStmt +// or its [Cursor.ParentEdge] is [edge.BlockStmt_List]. +// For example, given "for S1; ; S2 {}", the predecessor of S2 is S1, +// even though they are not executed in sequence. +func (c Cursor) Children() iter.Seq[Cursor] { + return func(yield func(Cursor) bool) { + c, ok := c.FirstChild() + for ok && yield(c) { + c, ok = c.NextSibling() + } + } +} + +// Contains reports whether c contains or is equal to c2. +// +// Both Cursors must belong to the same [Inspector]; +// neither may be its Root node. +func (c Cursor) Contains(c2 Cursor) bool { + if c.in != c2.in { + panic("different inspectors") + } + events := c.in.events + return c.index <= c2.index && events[c2.index].index <= events[c.index].index +} + +// FindNode returns the cursor for node n if it belongs to the subtree +// rooted at c. It returns zero if n is not found. +func (c Cursor) FindNode(n ast.Node) (Cursor, bool) { + + // FindNode is equivalent to this code, + // but more convenient and 15-20% faster: + if false { + for candidate := range c.Preorder(n) { + if candidate.Node() == n { + return candidate, true + } + } + return Cursor{}, false + } + + // TODO(adonovan): opt: should we assume Node.Pos is accurate + // and combine type-based filtering with position filtering + // like FindByPos? + + mask := maskOf([]ast.Node{n}) + events := c.in.events + + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && ev.node == n { + return Cursor{c.in, i}, true + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain type of n: skip. + i = pop + } + } + } + return Cursor{}, false +} + +// FindByPos returns the cursor for the innermost node n in the tree +// rooted at c such that n.Pos() <= start && end <= n.End(). +// (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.) +// +// It returns zero if none is found. +// Precondition: start <= end. +// +// See also [astutil.PathEnclosingInterval], which +// tolerates adjoining whitespace. +func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { + if end < start { + panic("end < start") + } + events := c.in.events + + // This algorithm could be implemented using c.Inspect, + // but it is about 2.5x slower. + + best := int32(-1) // push index of latest (=innermost) node containing range + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + n := ev.node + var nodeEnd token.Pos + if file, ok := n.(*ast.File); ok { + nodeEnd = file.FileEnd + // Note: files may be out of Pos order. + if file.FileStart > start { + i = ev.index // disjoint, after; skip to next file + continue + } + } else { + nodeEnd = n.End() + if n.Pos() > start { + break // disjoint, after; stop + } + } + // Inv: node.{Pos,FileStart} <= start + if end <= nodeEnd { + // node fully contains target range + best = i + } else if nodeEnd < start { + i = ev.index // disjoint, before; skip forward + } + } + } + if best >= 0 { + return Cursor{c.in, best}, true + } + return Cursor{}, false +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 674490a65b..a703cdfcf9 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -13,10 +13,19 @@ // This representation is sometimes called a "balanced parenthesis tree." // // Experiments suggest the inspector's traversals are about 2.5x faster -// than ast.Inspect, but it may take around 5 traversals for this +// than [ast.Inspect], but it may take around 5 traversals for this // benefit to amortize the inspector's construction cost. // If efficiency is the primary concern, do not use Inspector for // one-off traversals. +// +// The [Cursor] type provides a more flexible API for efficient +// navigation of syntax trees in all four "cardinal directions". For +// example, traversals may be nested, so you can find each node of +// type A and then search within it for nodes of type B. Or you can +// traverse from a node to its immediate neighbors: its parent, its +// previous and next sibling, or its first and last child. We +// recommend using methods of Cursor in preference to Inspector where +// possible. package inspector // There are four orthogonal features in a traversal: @@ -37,9 +46,8 @@ package inspector import ( "go/ast" - _ "unsafe" - "golang.org/x/tools/internal/astutil/edge" + "golang.org/x/tools/go/ast/edge" ) // An Inspector provides methods for inspecting @@ -48,18 +56,12 @@ type Inspector struct { events []event } -//go:linkname events golang.org/x/tools/go/ast/inspector.events -func events(in *Inspector) []event { return in.events } - -//go:linkname packEdgeKindAndIndex golang.org/x/tools/go/ast/inspector.packEdgeKindAndIndex func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { return int32(uint32(index+1)<<7 | uint32(ek)) } // unpackEdgeKindAndIndex unpacks the edge kind and edge index (within // an []ast.Node slice) from the parent field of a pop event. -// -//go:linkname unpackEdgeKindAndIndex golang.org/x/tools/go/ast/inspector.unpackEdgeKindAndIndex func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { // The "parent" field of a pop node holds the // edge Kind in the lower 7 bits and the index+1 @@ -83,15 +85,21 @@ type event struct { // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). // Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits // n's children. // -// The complete traversal sequence is determined by ast.Inspect. +// The complete traversal sequence is determined by [ast.Inspect]. // The types argument, if non-empty, enables type-based filtering of // events. The function f is called only for nodes whose type // matches an element of the types slice. +// +// The [Cursor.Preorder] method provides a richer alternative interface. +// Example: +// +// for c := range in.Root().Preorder(types) { ... } func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // Because it avoids postorder calls to f, and the pruning // check, Preorder is almost twice as fast as Nodes. The two @@ -131,10 +139,18 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // of the non-nil children of the node, followed by a call of // f(n, false). // -// The complete traversal sequence is determined by ast.Inspect. +// The complete traversal sequence is determined by [ast.Inspect]. // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// ... +// return true +// } func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) for i := int32(0); i < int32(len(in.events)); { @@ -168,6 +184,15 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc // supplies each call to f an additional argument, the current // traversal stack. The stack's first element is the outermost node, // an *ast.File; its last is the innermost, n. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// stack := slices.Collect(c.Enclosing()) +// ... +// return true +// }) func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node @@ -233,7 +258,7 @@ type visitor struct { type item struct { index int32 // index of current node's push event parentIndex int32 // index of parent node's push event - typAccum uint64 // accumulated type bits of current node's descendents + typAccum uint64 // accumulated type bits of current node's descendants edgeKindAndIndex int32 // edge.Kind and index, bit packed } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index e936c67c98..9852331a3d 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - _ "unsafe" ) const ( @@ -217,7 +215,6 @@ func typeOf(n ast.Node) uint64 { return 0 } -//go:linkname maskOf golang.org/x/tools/go/ast/inspector.maskOf func maskOf(nodes []ast.Node) uint64 { if len(nodes) == 0 { return math.MaxUint64 // match all node types diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go index 5a42174a0a..5f1c93c8a7 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/walk.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -13,7 +13,7 @@ import ( "fmt" "go/ast" - "golang.org/x/tools/internal/astutil/edge" + "golang.org/x/tools/go/ast/edge" ) func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index f1931d10ee..366aab6b2c 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -76,6 +76,8 @@ uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. +See also [golang.org/x/tools/go/packages/internal/linecount] +for an example application. # The driver protocol diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 96e43cd809..89f89dd2dc 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -224,13 +224,22 @@ extractQueries: return response.dr, nil } +// abs returns an absolute representation of path, based on cfg.Dir. +func (cfg *Config) abs(path string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + // In case cfg.Dir is relative, pass it to filepath.Abs. + return filepath.Abs(filepath.Join(cfg.Dir, path)) +} + func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) // Pass absolute path of directory to go list so that it knows to treat it as a directory, // not a package path. - pattern, err := filepath.Abs(fdir) + pattern, err := state.cfg.abs(fdir) if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } @@ -703,9 +712,8 @@ func (state *golistState) getGoVersion() (int, error) { // getPkgPath finds the package path of a directory if it's relative to a root // directory. func (state *golistState) getPkgPath(dir string) (string, bool, error) { - absDir, err := filepath.Abs(dir) - if err != nil { - return "", false, err + if !filepath.IsAbs(dir) { + panic("non-absolute dir passed to getPkgPath") } roots, err := state.determineRootDirs() if err != nil { @@ -715,7 +723,7 @@ func (state *golistState) getPkgPath(dir string) (string, bool, error) { for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, rdir) { + if !strings.HasPrefix(dir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index d823c474ad..d9d5a45cd4 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -55,7 +55,7 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error) } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - absDir, err := filepath.Abs(mod.Dir) + absDir, err := state.cfg.abs(mod.Dir) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 16ed3c1780..d3c2913bef 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -603,7 +603,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { type hasTypeParams interface { TypeParams() *types.TypeParamList } - // abstraction of *types.{Named,TypeParam} + // abstraction of *types.{Alias,Named,TypeParam} type hasObj interface { Obj() *types.TypeName } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 89b96381cd..50b6ca51a6 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,12 +27,13 @@ import ( "unicode" "unicode/utf8" + "maps" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" "golang.org/x/tools/internal/stdlib" - "maps" ) // importToGroup is a list of functions which map from an import path to @@ -290,8 +291,8 @@ func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) erro return nil } -// if there is a trailing major version, remove it -func withoutVersion(nm string) string { +// WithoutVersion removes a trailing major version, if there is one. +func WithoutVersion(nm string) string { if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { if _, err := strconv.Atoi(v[1:]); err == nil { // this is, for instance, called with rand/v2 and returns rand @@ -313,7 +314,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { } known := p.knownPackages[imp.ImportPath] if known != nil && known.Name != "" { - return withoutVersion(known.Name) + return WithoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 2215a12880..b5f5218b5c 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -93,7 +93,7 @@ func FixImports(ctx context.Context, filename string, src []byte, goroot string, // env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { // Don't use parse() -- we don't care about fragments or statement lists - // here, and we need to work with unparseable files. + // here, and we need to work with unparsable files. fileSet := token.NewFileSet() parserMode := parser.SkipObjectResolution if opt.Comments { diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go index 05229f06ce..ca745d4a1b 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_modindex.go +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -15,6 +15,10 @@ import ( // This code is here rather than in the modindex package // to avoid import loops +// TODO(adonovan): this code is only used by a test in this package. +// Can we delete it? Or is there a plan to call NewIndexSource from +// cmd/goimports? + // implements Source using modindex, so only for module cache. // // this is perhaps over-engineered. A new Index is read at first use. @@ -22,8 +26,8 @@ import ( // is read if the index changed. It is not clear the Mutex is needed. type IndexSource struct { modcachedir string - mutex sync.Mutex - ix *modindex.Index + mu sync.Mutex + index *modindex.Index // (access via getIndex) expires time.Time } @@ -39,13 +43,14 @@ func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths } func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { - if err := s.maybeReadIndex(); err != nil { + index, err := s.getIndex() + if err != nil { return nil, err } var cs []modindex.Candidate for pkg, nms := range missing { for nm := range nms { - x := s.ix.Lookup(pkg, nm, false) + x := index.Lookup(pkg, nm, false) cs = append(cs, x...) } } @@ -74,30 +79,22 @@ func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, mi return ans, nil } -func (s *IndexSource) maybeReadIndex() error { - s.mutex.Lock() - defer s.mutex.Unlock() - - var readIndex bool - if time.Now().After(s.expires) { - ok, err := modindex.Update(s.modcachedir) - if err != nil { - return err - } - if ok { - readIndex = true - } - } +func (s *IndexSource) getIndex() (*modindex.Index, error) { + s.mu.Lock() + defer s.mu.Unlock() - if readIndex || s.ix == nil { - ix, err := modindex.ReadIndex(s.modcachedir) + // (s.index = nil => s.expires is zero, + // so the first condition is strictly redundant. + // But it makes the postcondition very clear.) + if s.index == nil || time.Now().After(s.expires) { + index, err := modindex.Update(s.modcachedir) if err != nil { - return err + return nil, err } - s.ix = ix - // for now refresh every 15 minutes - s.expires = time.Now().Add(time.Minute * 15) + s.index = index + s.expires = index.ValidAt.Add(15 * time.Minute) // (refresh period) } + // Inv: s.index != nil - return nil + return s.index, nil } diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go index 1e1a02f239..9a963744b5 100644 --- a/vendor/golang.org/x/tools/internal/modindex/directories.go +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "regexp" - "slices" "strings" "sync" "time" @@ -20,50 +19,48 @@ import ( ) type directory struct { - path Relpath + path string // relative to GOMODCACHE importPath string version string // semantic version - syms []symbol } -// filterDirs groups the directories by import path, -// sorting the ones with the same import path by semantic version, -// most recent first. -func byImportPath(dirs []Relpath) (map[string][]*directory, error) { - ans := make(map[string][]*directory) // key is import path - for _, d := range dirs { - ip, sv, err := DirToImportPathVersion(d) +// bestDirByImportPath returns the best directory for each import +// path, where "best" means most recent semantic version. These import +// paths are inferred from the GOMODCACHE-relative dir names in dirs. +func bestDirByImportPath(dirs []string) (map[string]directory, error) { + dirsByPath := make(map[string]directory) + for _, dir := range dirs { + importPath, version, err := dirToImportPathVersion(dir) if err != nil { return nil, err } - ans[ip] = append(ans[ip], &directory{ - path: d, - importPath: ip, - version: sv, - }) - } - for k, v := range ans { - semanticSort(v) - ans[k] = v + new := directory{ + path: dir, + importPath: importPath, + version: version, + } + if old, ok := dirsByPath[importPath]; !ok || compareDirectory(new, old) < 0 { + dirsByPath[importPath] = new + } } - return ans, nil + return dirsByPath, nil } -// sort the directories by semantic version, latest first -func semanticSort(v []*directory) { - slices.SortFunc(v, func(l, r *directory) int { - if n := semver.Compare(l.version, r.version); n != 0 { - return -n // latest first - } - return strings.Compare(string(l.path), string(r.path)) - }) +// compareDirectory defines an ordering of path@version directories, +// by descending version, then by ascending path. +func compareDirectory(x, y directory) int { + if sign := -semver.Compare(x.version, y.version); sign != 0 { + return sign // latest first + } + return strings.Compare(string(x.path), string(y.path)) } // modCacheRegexp splits a relpathpath into module, module version, and package. var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) -// DirToImportPathVersion computes import path and semantic version -func DirToImportPathVersion(dir Relpath) (string, string, error) { +// dirToImportPathVersion computes import path and semantic version +// from a GOMODCACHE-relative directory name. +func dirToImportPathVersion(dir string) (string, string, error) { m := modCacheRegexp.FindStringSubmatch(string(dir)) // m[1] is the module path // m[2] is the version major.minor.patch(-
 that contains the name
+// Package modindex contains code for building and searching an
+// [Index] of the Go module cache.
+package modindex
+
+// The directory containing the index, returned by
+// [IndexDir], contains a file index-name- that contains the name
 // of the current index. We believe writing that short file is atomic.
-// ReadIndex reads that file to get the file name of the index.
+// [Read] reads that file to get the file name of the index.
 // WriteIndex writes an index with a unique name and then
 // writes that name into a new version of index-name-.
 // ( stands for the CurrentVersion of the index format.)
-package modindex
 
 import (
+	"maps"
+	"os"
 	"path/filepath"
 	"slices"
 	"strings"
@@ -21,144 +25,95 @@ import (
 	"golang.org/x/mod/semver"
 )
 
-// Create always creates a new index for the go module cache that is in cachedir.
-func Create(cachedir string) error {
-	_, err := indexModCache(cachedir, true)
-	return err
-}
-
-// Update the index for the go module cache that is in cachedir,
-// If there is no existing index it will build one.
-// If there are changed directories since the last index, it will
-// write a new one and return true. Otherwise it returns false.
-func Update(cachedir string) (bool, error) {
-	return indexModCache(cachedir, false)
+// Update updates the index for the specified Go
+// module cache directory, creating it as needed.
+// On success it returns the current index.
+func Update(gomodcache string) (*Index, error) {
+	prev, err := Read(gomodcache)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return nil, err
+		}
+		prev = nil
+	}
+	return update(gomodcache, prev)
 }
 
-// indexModCache writes an index current as of when it is called.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and the updates to the cache. It returns true if it wrote an index,
-// false otherwise.
-func indexModCache(cachedir string, clear bool) (bool, error) {
-	cachedir, err := filepath.Abs(cachedir)
+// update builds, writes, and returns the current index.
+//
+// If old is nil, the new index is built from all of GOMODCACHE;
+// otherwise it is built from the old index plus cache updates
+// since the previous index's time.
+func update(gomodcache string, old *Index) (*Index, error) {
+	gomodcache, err := filepath.Abs(gomodcache)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	cd := Abspath(cachedir)
-	future := time.Now().Add(24 * time.Hour) // safely in the future
-	ok, err := modindexTimed(future, cd, clear)
+	new, changed, err := build(gomodcache, old)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	return ok, nil
-}
-
-// modindexTimed writes an index current as of onlyBefore.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and all the updates to the cache before onlyBefore.
-// It returns true if it wrote a new index, false if it wrote nothing.
-func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
-	var curIndex *Index
-	if !clear {
-		var err error
-		curIndex, err = ReadIndex(string(cachedir))
-		if clear && err != nil {
-			return false, err
+	if old == nil || changed {
+		if err := write(gomodcache, new); err != nil {
+			return nil, err
 		}
-		// TODO(pjw): check that most of those directories still exist
-	}
-	cfg := &work{
-		onlyBefore: onlyBefore,
-		oldIndex:   curIndex,
-		cacheDir:   cachedir,
-	}
-	if curIndex != nil {
-		cfg.onlyAfter = curIndex.Changed
-	}
-	if err := cfg.buildIndex(); err != nil {
-		return false, err
 	}
-	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
-		// no changes from existing curIndex, don't write a new index
-		return false, nil
-	}
-	if err := cfg.writeIndex(); err != nil {
-		return false, err
-	}
-	return true, nil
-}
-
-type work struct {
-	onlyBefore time.Time // do not use directories later than this
-	onlyAfter  time.Time // only interested in directories after this
-	// directories from before onlyAfter come from oldIndex
-	oldIndex *Index
-	newIndex *Index
-	cacheDir Abspath
+	return new, nil
 }
 
-func (w *work) buildIndex() error {
-	// The effective date of the new index should be at least
-	// slightly earlier than when the directories are scanned
-	// so set it now.
-	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
-	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
-	if len(dirs) == 0 {
-		return nil
+// build returns a new index for the specified Go module cache (an
+// absolute path).
+//
+// If an old index is provided, only directories more recent than it
+// that it are scanned; older directories are provided by the old
+// Index.
+//
+// The boolean result indicates whether new entries were found.
+func build(gomodcache string, old *Index) (*Index, bool, error) {
+	// Set the time window.
+	var start time.Time // = dawn of time
+	if old != nil {
+		start = old.ValidAt
 	}
-	newdirs, err := byImportPath(dirs)
+	now := time.Now()
+	end := now.Add(24 * time.Hour) // safely in the future
+
+	// Enumerate GOMODCACHE package directories.
+	// Choose the best (latest) package for each import path.
+	pkgDirs := findDirs(gomodcache, start, end)
+	dirByPath, err := bestDirByImportPath(pkgDirs)
 	if err != nil {
-		return err
+		return nil, false, err
 	}
-	// for each import path it might occur only in newdirs,
-	// only in w.oldIndex, or in both.
-	// If it occurs in both, use the semantically later one
-	if w.oldIndex != nil {
-		for _, e := range w.oldIndex.Entries {
-			found, ok := newdirs[e.ImportPath]
-			if !ok {
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				continue // use this one, there is no new one
-			}
-			if semver.Compare(found[0].version, e.Version) > 0 {
-				// use the new one
-			} else {
-				// use the old one, forget the new one
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				delete(newdirs, e.ImportPath)
+
+	// For each import path it might occur only in
+	// dirByPath, only in old, or in both.
+	// If both, use the semantically later one.
+	var entries []Entry
+	if old != nil {
+		for _, entry := range old.Entries {
+			dir, ok := dirByPath[entry.ImportPath]
+			if !ok || semver.Compare(dir.version, entry.Version) <= 0 {
+				// New dir is missing or not more recent; use old entry.
+				entries = append(entries, entry)
+				delete(dirByPath, entry.ImportPath)
 			}
 		}
 	}
-	// get symbol information for all the new diredtories
-	getSymbols(w.cacheDir, newdirs)
-	// assemble the new index entries
-	for k, v := range newdirs {
-		d := v[0]
-		pkg, names := processSyms(d.syms)
-		if pkg == "" {
-			continue // PJW: does this ever happen?
-		}
-		entry := Entry{
-			PkgName:    pkg,
-			Dir:        d.path,
-			ImportPath: k,
-			Version:    d.version,
-			Names:      names,
-		}
-		w.newIndex.Entries = append(w.newIndex.Entries, entry)
-	}
-	// sort the entries in the new index
-	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
-		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+
+	// Extract symbol information for all the new directories.
+	newEntries := extractSymbols(gomodcache, maps.Values(dirByPath))
+	entries = append(entries, newEntries...)
+	slices.SortFunc(entries, func(x, y Entry) int {
+		if n := strings.Compare(x.PkgName, y.PkgName); n != 0 {
 			return n
 		}
-		return strings.Compare(l.ImportPath, r.ImportPath)
+		return strings.Compare(x.ImportPath, y.ImportPath)
 	})
-	return nil
-}
 
-func (w *work) writeIndex() error {
-	return writeIndex(w.cacheDir, w.newIndex)
+	return &Index{
+		GOMODCACHE: gomodcache,
+		ValidAt:    now, // time before the directories were scanned
+		Entries:    entries,
+	}, len(newEntries) > 0, nil
 }
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
index b918529d43..fe24db9b13 100644
--- a/vendor/golang.org/x/tools/internal/modindex/symbols.go
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -10,11 +10,13 @@ import (
 	"go/parser"
 	"go/token"
 	"go/types"
+	"iter"
 	"os"
 	"path/filepath"
 	"runtime"
 	"slices"
 	"strings"
+	"sync"
 
 	"golang.org/x/sync/errgroup"
 )
@@ -30,45 +32,69 @@ import (
 type symbol struct {
 	pkg  string // name of the symbols's package
 	name string // declared name
-	kind string // T, C, V, or F, follwed by D if deprecated
+	kind string // T, C, V, or F, followed by D if deprecated
 	sig  string // signature information, for F
 }
 
-// find the symbols for the best directories
-func getSymbols(cd Abspath, dirs map[string][]*directory) {
+// extractSymbols returns a (new, unordered) array of Entries, one for
+// each provided package directory, describing its exported symbols.
+func extractSymbols(cwd string, dirs iter.Seq[directory]) []Entry {
+	var (
+		mu      sync.Mutex
+		entries []Entry
+	)
+
 	var g errgroup.Group
 	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
-	for _, vv := range dirs {
-		// throttling some day?
-		d := vv[0]
+	for dir := range dirs {
 		g.Go(func() error {
-			thedir := filepath.Join(string(cd), string(d.path))
+			thedir := filepath.Join(cwd, string(dir.path))
 			mode := parser.SkipObjectResolution | parser.ParseComments
 
-			fi, err := os.ReadDir(thedir)
+			// Parse all Go files in dir and extract symbols.
+			dirents, err := os.ReadDir(thedir)
 			if err != nil {
 				return nil // log this someday?
 			}
-			for _, fx := range fi {
-				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+			var syms []symbol
+			for _, dirent := range dirents {
+				if !strings.HasSuffix(dirent.Name(), ".go") ||
+					strings.HasSuffix(dirent.Name(), "_test.go") {
 					continue
 				}
-				fname := filepath.Join(thedir, fx.Name())
+				fname := filepath.Join(thedir, dirent.Name())
 				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
 				if err != nil {
 					continue // ignore errors, someday log them?
 				}
-				d.syms = append(d.syms, getFileExports(tr)...)
+				syms = append(syms, getFileExports(tr)...)
+			}
+
+			// Create an entry for the package.
+			pkg, names := processSyms(syms)
+			if pkg != "" {
+				mu.Lock()
+				defer mu.Unlock()
+				entries = append(entries, Entry{
+					PkgName:    pkg,
+					Dir:        dir.path,
+					ImportPath: dir.importPath,
+					Version:    dir.version,
+					Names:      names,
+				})
 			}
+
 			return nil
 		})
 	}
-	g.Wait()
+	g.Wait() // ignore error
+
+	return entries
 }
 
 func getFileExports(f *ast.File) []symbol {
 	pkg := f.Name.Name
-	if pkg == "main" {
+	if pkg == "main" || pkg == "" {
 		return nil
 	}
 	var ans []symbol
@@ -110,7 +136,7 @@ func getFileExports(f *ast.File) []symbol {
 				// The only place a $ can occur seems to be in a struct tag, which
 				// can be an arbitrary string literal, and ExprString does not presently
 				// print struct tags. So for this to happen the type of a formal parameter
-				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
+				// has to be a explicit struct, e.g. foo(x struct{a int "$"}) and ExprString
 				// would have to show the struct tag. Even testing for this case seems
 				// a waste of effort, but let's remember the possibility
 				if strings.Contains(tp, "$") {
@@ -202,17 +228,18 @@ func processSyms(syms []symbol) (string, []string) {
 	pkg := syms[0].pkg
 	var names []string
 	for _, s := range syms {
+		if s.pkg != pkg {
+			// Symbols came from two files in same dir
+			// with different package declarations.
+			continue
+		}
 		var nx string
-		if s.pkg == pkg {
-			if s.sig != "" {
-				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
-			} else {
-				nx = fmt.Sprintf("%s %s", s.name, s.kind)
-			}
-			names = append(names, nx)
+		if s.sig != "" {
+			nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
 		} else {
-			continue // PJW: do we want to keep track of these?
+			nx = fmt.Sprintf("%s %s", s.name, s.kind)
 		}
+		names = append(names, nx)
 	}
 	return pkg, names
 }
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
deleted file mode 100644
index ece4488630..0000000000
--- a/vendor/golang.org/x/tools/internal/modindex/types.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modindex
-
-import (
-	"strings"
-)
-
-// some special types to avoid confusions
-
-// distinguish various types of directory names. It's easy to get confused.
-type Abspath string // absolute paths
-type Relpath string // paths with GOMODCACHE prefix removed
-
-func toRelpath(cachedir Abspath, s string) Relpath {
-	if strings.HasPrefix(s, string(cachedir)) {
-		if s == string(cachedir) {
-			return Relpath("")
-		}
-		return Relpath(s[len(cachedir)+1:])
-	}
-	return Relpath(s)
-}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 73eefa2a7d..929b470beb 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,6 +5,8 @@
 // Package packagesinternal exposes internal-only fields from go/packages.
 package packagesinternal
 
+import "fmt"
+
 var GetDepsErrors = func(p any) []*PackageError { return nil }
 
 type PackageError struct {
@@ -13,5 +15,9 @@ type PackageError struct {
 	Err         string   // the error itself
 }
 
+func (err PackageError) String() string {
+	return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack)
+}
+
 var TypecheckCgo int
 var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
index 649c82b6be..3db2a135b9 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
@@ -65,14 +65,16 @@ func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
 	if info.Types == nil {
 		panic("ClassifyCall: info.Types is nil")
 	}
-	if info.Types[call.Fun].IsType() {
+	tv := info.Types[call.Fun]
+	if tv.IsType() {
 		return CallConversion
 	}
+	if tv.IsBuiltin() {
+		return CallBuiltin
+	}
 	obj := info.Uses[UsedIdent(info, call.Fun)]
 	// Classify the call by the type of the object, if any.
 	switch obj := obj.(type) {
-	case *types.Builtin:
-		return CallBuiltin
 	case *types.Func:
 		if interfaceMethod(obj) {
 			return CallInterface
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index cc244689ef..a5cd7e8dbf 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -69,6 +69,34 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
 	}
 }
 
+// TypeNameFor returns the type name symbol for the specified type, if
+// it is a [*types.Alias], [*types.Named], [*types.TypeParam], or a
+// [*types.Basic] representing a type.
+//
+// For all other types, and for Basic types representing a builtin,
+// constant, or nil, it returns nil. Be careful not to convert the
+// resulting nil pointer to a [types.Object]!
+//
+// If t is the type of a constant, it may be an "untyped" type, which
+// has no TypeName. To access the name of such types (e.g. "untyped
+// int"), use [types.Basic.Name].
+func TypeNameFor(t types.Type) *types.TypeName {
+	switch t := t.(type) {
+	case *types.Alias:
+		return t.Obj()
+	case *types.Named:
+		return t.Obj()
+	case *types.TypeParam:
+		return t.Obj()
+	case *types.Basic:
+		// See issues #71886 and #66890 for some history.
+		if tname, ok := types.Universe.Lookup(t.Name()).(*types.TypeName); ok {
+			return tname
+		}
+	}
+	return nil
+}
+
 // A NamedOrAlias is a [types.Type] that is named (as
 // defined by the spec) and capable of bearing type parameters: it
 // abstracts aliases ([types.Alias]) and defined types
@@ -77,7 +105,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
 // Every type declared by an explicit "type" declaration is a
 // NamedOrAlias. (Built-in type symbols may additionally
 // have type [types.Basic], which is not a NamedOrAlias,
-// though the spec regards them as "named".)
+// though the spec regards them as "named"; see [TypeNameFor].)
 //
 // NamedOrAlias cannot expose the Origin method, because
 // [types.Alias.Origin] and [types.Named.Origin] have different
@@ -85,32 +113,15 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
 type NamedOrAlias interface {
 	types.Type
 	Obj() *types.TypeName
-	// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
-}
-
-// TypeParams is a light shim around t.TypeParams().
-// (go/types.Alias).TypeParams requires >= 1.23.
-func TypeParams(t NamedOrAlias) *types.TypeParamList {
-	switch t := t.(type) {
-	case *types.Alias:
-		return aliases.TypeParams(t)
-	case *types.Named:
-		return t.TypeParams()
-	}
-	return nil
+	TypeArgs() *types.TypeList
+	TypeParams() *types.TypeParamList
+	SetTypeParams(tparams []*types.TypeParam)
 }
 
-// TypeArgs is a light shim around t.TypeArgs().
-// (go/types.Alias).TypeArgs requires >= 1.23.
-func TypeArgs(t NamedOrAlias) *types.TypeList {
-	switch t := t.(type) {
-	case *types.Alias:
-		return aliases.TypeArgs(t)
-	case *types.Named:
-		return t.TypeArgs()
-	}
-	return nil
-}
+var (
+	_ NamedOrAlias = (*types.Alias)(nil)
+	_ NamedOrAlias = (*types.Named)(nil)
+)
 
 // Origin returns the generic type of the Named or Alias type t if it
 // is instantiated, otherwise it returns t.
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc983e..743bfb81d6 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) {
 func SizeVarint(v uint64) int {
 	// This computes 1 + (bits.Len64(v)-1)/7.
 	// 9/64 is a good enough approximation of 1/7
-	return int(9*uint32(bits.Len64(v))+64) / 64
+	//
+	// The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
+	// instruction, which is very fast on CPUs from the last few years. The
+	// specific way of expressing the calculation matches C++ Protobuf, see
+	// https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
+	// optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
+
+	// By OR'ing v with 1, we guarantee that v is never 0, without changing the
+	// result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
+	// needs to add extra instructions to handle that case.
+	//
+	// The Go compiler currently (go1.24.4) does not make use of this knowledge.
+	// This opportunity (removing the XOR instruction, which handles the 0 case)
+	// results in a small (1%) performance win across CPU architectures.
+	//
+	// Independently of avoiding the 0 case, we need the v |= 1 line because
+	// it allows the Go compiler to eliminate an extra XCHGL barrier.
+	v |= 1
+
+	// It would be clearer to write log2value := 63 - uint32(...), but
+	// writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
+	// Proof of identity for our value range [0..63]:
+	// https://go.dev/play/p/Pdn9hEWYakX
+	log2value := uint32(bits.LeadingZeros64(v)) ^ 63
+	return int((log2value*9 + (64 + 9)) / 64)
 }
 
 // AppendFixed32 appends v to b as a little-endian uint32.
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 5a57ef6f3c..04696351ee 100644
Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 10132c9b38..a0aad2777f 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -69,6 +69,12 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
 				parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
 			case genid.FeatureSet_JsonFormat_field_number:
 				parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
+			case genid.FeatureSet_EnforceNamingStyle_field_number:
+				// EnforceNamingStyle is enforced in protoc, languages other than C++
+				// are not supposed to do anything with this feature.
+			case genid.FeatureSet_DefaultSymbolVisibility_field_number:
+				// DefaultSymbolVisibility is enforced in protoc, runtimes should not
+				// inspect this value.
 			default:
 				panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
new file mode 100644
index 0000000000..a12ec9791c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import "google.golang.org/protobuf/reflect/protoreflect"
+
+// UsePresenceForField reports whether the presence bitmap should be used for
+// the specified field.
+func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+	switch {
+	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+		// Oneof fields never use the presence bitmap.
+		//
+		// Synthetic oneofs are an exception: Those are used to implement proto3
+		// optional fields and hence should follow non-oneof field semantics.
+		return false, false
+
+	case fd.IsMap():
+		// Map-typed fields never use the presence bitmap.
+		return false, false
+
+	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+		// Lazy fields always use the presence bitmap (only messages can be lazy).
+		isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
+		return isLazy, isLazy
+
+	default:
+		// If the field has presence, use the presence bitmap.
+		return fd.HasPresence(), false
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
index df8f918501..3ceb6fa7f5 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -27,6 +27,7 @@ const (
 	Api_SourceContext_field_name protoreflect.Name = "source_context"
 	Api_Mixins_field_name        protoreflect.Name = "mixins"
 	Api_Syntax_field_name        protoreflect.Name = "syntax"
+	Api_Edition_field_name       protoreflect.Name = "edition"
 
 	Api_Name_field_fullname          protoreflect.FullName = "google.protobuf.Api.name"
 	Api_Methods_field_fullname       protoreflect.FullName = "google.protobuf.Api.methods"
@@ -35,6 +36,7 @@ const (
 	Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
 	Api_Mixins_field_fullname        protoreflect.FullName = "google.protobuf.Api.mixins"
 	Api_Syntax_field_fullname        protoreflect.FullName = "google.protobuf.Api.syntax"
+	Api_Edition_field_fullname       protoreflect.FullName = "google.protobuf.Api.edition"
 )
 
 // Field numbers for google.protobuf.Api.
@@ -46,6 +48,7 @@ const (
 	Api_SourceContext_field_number protoreflect.FieldNumber = 5
 	Api_Mixins_field_number        protoreflect.FieldNumber = 6
 	Api_Syntax_field_number        protoreflect.FieldNumber = 7
+	Api_Edition_field_number       protoreflect.FieldNumber = 8
 )
 
 // Names for google.protobuf.Method.
@@ -63,6 +66,7 @@ const (
 	Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
 	Method_Options_field_name           protoreflect.Name = "options"
 	Method_Syntax_field_name            protoreflect.Name = "syntax"
+	Method_Edition_field_name           protoreflect.Name = "edition"
 
 	Method_Name_field_fullname              protoreflect.FullName = "google.protobuf.Method.name"
 	Method_RequestTypeUrl_field_fullname    protoreflect.FullName = "google.protobuf.Method.request_type_url"
@@ -71,6 +75,7 @@ const (
 	Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
 	Method_Options_field_fullname           protoreflect.FullName = "google.protobuf.Method.options"
 	Method_Syntax_field_fullname            protoreflect.FullName = "google.protobuf.Method.syntax"
+	Method_Edition_field_fullname           protoreflect.FullName = "google.protobuf.Method.edition"
 )
 
 // Field numbers for google.protobuf.Method.
@@ -82,6 +87,7 @@ const (
 	Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
 	Method_Options_field_number           protoreflect.FieldNumber = 6
 	Method_Syntax_field_number            protoreflect.FieldNumber = 7
+	Method_Edition_field_number           protoreflect.FieldNumber = 8
 )
 
 // Names for google.protobuf.Mixin.
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index f30ab6b586..950a6a325a 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -34,6 +34,19 @@ const (
 	Edition_EDITION_MAX_enum_value             = 2147483647
 )
 
+// Full and short names for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
+	SymbolVisibility_enum_name     = "SymbolVisibility"
+)
+
+// Enum values for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_VISIBILITY_UNSET_enum_value  = 0
+	SymbolVisibility_VISIBILITY_LOCAL_enum_value  = 1
+	SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
+)
+
 // Names for google.protobuf.FileDescriptorSet.
 const (
 	FileDescriptorSet_message_name     protoreflect.Name     = "FileDescriptorSet"
@@ -65,6 +78,7 @@ const (
 	FileDescriptorProto_Dependency_field_name       protoreflect.Name = "dependency"
 	FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
 	FileDescriptorProto_WeakDependency_field_name   protoreflect.Name = "weak_dependency"
+	FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
 	FileDescriptorProto_MessageType_field_name      protoreflect.Name = "message_type"
 	FileDescriptorProto_EnumType_field_name         protoreflect.Name = "enum_type"
 	FileDescriptorProto_Service_field_name          protoreflect.Name = "service"
@@ -79,6 +93,7 @@ const (
 	FileDescriptorProto_Dependency_field_fullname       protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
 	FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
 	FileDescriptorProto_WeakDependency_field_fullname   protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+	FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
 	FileDescriptorProto_MessageType_field_fullname      protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
 	FileDescriptorProto_EnumType_field_fullname         protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
 	FileDescriptorProto_Service_field_fullname          protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
@@ -96,6 +111,7 @@ const (
 	FileDescriptorProto_Dependency_field_number       protoreflect.FieldNumber = 3
 	FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
 	FileDescriptorProto_WeakDependency_field_number   protoreflect.FieldNumber = 11
+	FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
 	FileDescriptorProto_MessageType_field_number      protoreflect.FieldNumber = 4
 	FileDescriptorProto_EnumType_field_number         protoreflect.FieldNumber = 5
 	FileDescriptorProto_Service_field_number          protoreflect.FieldNumber = 6
@@ -124,6 +140,7 @@ const (
 	DescriptorProto_Options_field_name        protoreflect.Name = "options"
 	DescriptorProto_ReservedRange_field_name  protoreflect.Name = "reserved_range"
 	DescriptorProto_ReservedName_field_name   protoreflect.Name = "reserved_name"
+	DescriptorProto_Visibility_field_name     protoreflect.Name = "visibility"
 
 	DescriptorProto_Name_field_fullname           protoreflect.FullName = "google.protobuf.DescriptorProto.name"
 	DescriptorProto_Field_field_fullname          protoreflect.FullName = "google.protobuf.DescriptorProto.field"
@@ -135,6 +152,7 @@ const (
 	DescriptorProto_Options_field_fullname        protoreflect.FullName = "google.protobuf.DescriptorProto.options"
 	DescriptorProto_ReservedRange_field_fullname  protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
 	DescriptorProto_ReservedName_field_fullname   protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+	DescriptorProto_Visibility_field_fullname     protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.DescriptorProto.
@@ -149,6 +167,7 @@ const (
 	DescriptorProto_Options_field_number        protoreflect.FieldNumber = 7
 	DescriptorProto_ReservedRange_field_number  protoreflect.FieldNumber = 9
 	DescriptorProto_ReservedName_field_number   protoreflect.FieldNumber = 10
+	DescriptorProto_Visibility_field_number     protoreflect.FieldNumber = 11
 )
 
 // Names for google.protobuf.DescriptorProto.ExtensionRange.
@@ -388,12 +407,14 @@ const (
 	EnumDescriptorProto_Options_field_name       protoreflect.Name = "options"
 	EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
 	EnumDescriptorProto_ReservedName_field_name  protoreflect.Name = "reserved_name"
+	EnumDescriptorProto_Visibility_field_name    protoreflect.Name = "visibility"
 
 	EnumDescriptorProto_Name_field_fullname          protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
 	EnumDescriptorProto_Value_field_fullname         protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
 	EnumDescriptorProto_Options_field_fullname       protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
 	EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
 	EnumDescriptorProto_ReservedName_field_fullname  protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+	EnumDescriptorProto_Visibility_field_fullname    protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.EnumDescriptorProto.
@@ -403,6 +424,7 @@ const (
 	EnumDescriptorProto_Options_field_number       protoreflect.FieldNumber = 3
 	EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
 	EnumDescriptorProto_ReservedName_field_number  protoreflect.FieldNumber = 5
+	EnumDescriptorProto_Visibility_field_number    protoreflect.FieldNumber = 6
 )
 
 // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
@@ -1008,29 +1030,35 @@ const (
 
 // Field names for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_name         protoreflect.Name = "field_presence"
-	FeatureSet_EnumType_field_name              protoreflect.Name = "enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_name        protoreflect.Name = "utf8_validation"
-	FeatureSet_MessageEncoding_field_name       protoreflect.Name = "message_encoding"
-	FeatureSet_JsonFormat_field_name            protoreflect.Name = "json_format"
-
-	FeatureSet_FieldPresence_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
-	FeatureSet_EnumType_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_fullname        protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
-	FeatureSet_MessageEncoding_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
-	FeatureSet_JsonFormat_field_fullname            protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+	FeatureSet_FieldPresence_field_name           protoreflect.Name = "field_presence"
+	FeatureSet_EnumType_field_name                protoreflect.Name = "enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_name   protoreflect.Name = "repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_name          protoreflect.Name = "utf8_validation"
+	FeatureSet_MessageEncoding_field_name         protoreflect.Name = "message_encoding"
+	FeatureSet_JsonFormat_field_name              protoreflect.Name = "json_format"
+	FeatureSet_EnforceNamingStyle_field_name      protoreflect.Name = "enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
+
+	FeatureSet_FieldPresence_field_fullname           protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+	FeatureSet_EnumType_field_fullname                protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_fullname   protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_fullname          protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+	FeatureSet_MessageEncoding_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+	FeatureSet_JsonFormat_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+	FeatureSet_EnforceNamingStyle_field_fullname      protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
 )
 
 // Field numbers for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_number         protoreflect.FieldNumber = 1
-	FeatureSet_EnumType_field_number              protoreflect.FieldNumber = 2
-	FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
-	FeatureSet_Utf8Validation_field_number        protoreflect.FieldNumber = 4
-	FeatureSet_MessageEncoding_field_number       protoreflect.FieldNumber = 5
-	FeatureSet_JsonFormat_field_number            protoreflect.FieldNumber = 6
+	FeatureSet_FieldPresence_field_number           protoreflect.FieldNumber = 1
+	FeatureSet_EnumType_field_number                protoreflect.FieldNumber = 2
+	FeatureSet_RepeatedFieldEncoding_field_number   protoreflect.FieldNumber = 3
+	FeatureSet_Utf8Validation_field_number          protoreflect.FieldNumber = 4
+	FeatureSet_MessageEncoding_field_number         protoreflect.FieldNumber = 5
+	FeatureSet_JsonFormat_field_number              protoreflect.FieldNumber = 6
+	FeatureSet_EnforceNamingStyle_field_number      protoreflect.FieldNumber = 7
+	FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
 )
 
 // Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1112,6 +1140,40 @@ const (
 	FeatureSet_LEGACY_BEST_EFFORT_enum_value  = 2
 )
 
+// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+	FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle"
+	FeatureSet_EnforceNamingStyle_enum_name     = "EnforceNamingStyle"
+)
+
+// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0
+	FeatureSet_STYLE2024_enum_value                    = 1
+	FeatureSet_STYLE_LEGACY_enum_value                 = 2
+)
+
+// Names for google.protobuf.FeatureSet.VisibilityFeature.
+const (
+	FeatureSet_VisibilityFeature_message_name     protoreflect.Name     = "VisibilityFeature"
+	FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
+)
+
+// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name     = "DefaultSymbolVisibility"
+)
+
+// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
+	FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value                        = 1
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value                  = 2
+	FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value                         = 3
+	FeatureSet_VisibilityFeature_STRICT_enum_value                            = 4
+)
+
 // Names for google.protobuf.FeatureSetDefaults.
 const (
 	FeatureSetDefaults_message_name     protoreflect.Name     = "FeatureSetDefaults"
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index 41c1f74ef8..bdad12a9bb 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -11,6 +11,7 @@ import (
 
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/order"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	piface "google.golang.org/protobuf/runtime/protoiface"
@@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
 		// permit us to skip over definitely-unset fields at marshal time.
 
 		var hasPresence bool
-		hasPresence, cf.isLazy = usePresenceForField(si, fd)
+		hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
 
 		if hasPresence {
 			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index dd55e8e009..5a439daacb 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -11,6 +11,7 @@ import (
 	"strings"
 	"sync/atomic"
 
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/reflect/protoreflect"
 )
 
@@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
 		fd := fds.Get(i)
 		fs := si.fieldsByNumber[fd.Number()]
 		var fi fieldInfo
-		usePresence, _ := usePresenceForField(si, fd)
+		usePresence, _ := filedesc.UsePresenceForField(fd)
 
 		switch {
 		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
@@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
 			if p.IsNil() {
 				return false
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return false
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			return rv.Elem().Len() > 0
 		},
 		clear: func(p pointer) {
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if !sp.IsNil() {
-				rv := sp.AsValueOf(fs.Type.Elem())
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if !rv.IsNil() {
 				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
 			}
 		},
@@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
 			if p.IsNil() {
 				return conv.Zero()
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return conv.Zero()
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			if rv.Elem().Len() == 0 {
 				return conv.Zero()
 			}
@@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
 func (mi *MessageInfo) present(p pointer, index uint32) bool {
 	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
 }
-
-// usePresenceForField implements the somewhat intricate logic of when
-// the presence bitmap is used for a field.  The main logic is that a
-// field that is optional or that can be lazy will use the presence
-// bit, but for proto2, also maps have a presence bit. It also records
-// if the field can ever be lazy, which is true if we have a
-// lazyOffset and the field is a message or a slice of messages. A
-// field that is lazy will always need a presence bit.  Oneofs are not
-// lazy and do not use presence, unless they are a synthetic oneof,
-// which is a proto3 optional field. For proto3 optionals, we use the
-// presence and they can also be lazy when applicable (a message).
-func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
-	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
-
-	// Non-oneof scalar fields with explicit field presence use the presence array.
-	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
-	switch {
-	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-		return false, false
-	case fd.IsMap():
-		return false, false
-	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
-		return hasLazyField, hasLazyField
-	default:
-		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
index 914cb1deda..443afe81cd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/presence.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) {
 
 // Present checks for the presence of a specific field number in a presence set.
 func (p presence) Present(num uint32) bool {
-	if p.P == nil {
-		return false
-	}
 	return Export{}.Present(p.toElem(num), num)
 }
 
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
similarity index 99%
rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
index 1ffddf6877..42dd6f70c6 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.21
-
 package strs
 
 import (
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
deleted file mode 100644
index 832a7988f1..0000000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package strs
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type (
-	stringHeader struct {
-		Data unsafe.Pointer
-		Len  int
-	}
-	sliceHeader struct {
-		Data unsafe.Pointer
-		Len  int
-		Cap  int
-	}
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) (s string) {
-	src := (*sliceHeader)(unsafe.Pointer(&b))
-	dst := (*stringHeader)(unsafe.Pointer(&s))
-	dst.Data = src.Data
-	dst.Len = src.Len
-	return s
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) (b []byte) {
-	src := (*stringHeader)(unsafe.Pointer(&s))
-	dst := (*sliceHeader)(unsafe.Pointer(&b))
-	dst.Data = src.Data
-	dst.Len = src.Len
-	dst.Cap = src.Len
-	return b
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
-	buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
-	n := len(prefix) + len(".") + len(name)
-	if len(prefix) == 0 {
-		n -= len(".")
-	}
-	sb.grow(n)
-	sb.buf = append(sb.buf, prefix...)
-	sb.buf = append(sb.buf, '.')
-	sb.buf = append(sb.buf, name...)
-	return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
-	sb.grow(len(b))
-	sb.buf = append(sb.buf, b...)
-	return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
-	if cap(sb.buf)-len(sb.buf) >= n {
-		return
-	}
-
-	// Unlike strings.Builder, we do not need to copy over the contents
-	// of the old buffer since our builder provides no API for
-	// retrieving previously created strings.
-	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
-	return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 01efc33030..697d1c14f3 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
 const (
 	Major      = 1
 	Minor      = 36
-	Patch      = 5
+	Patch      = 8
 	PreRelease = ""
 )
 
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index 3c6fe57807..ef55b97dde 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -59,6 +59,12 @@ func Clone(m Message) Message {
 	return dst.Interface()
 }
 
+// CloneOf returns a deep copy of m. If the top-level message is invalid,
+// it returns an invalid message as well.
+func CloneOf[M Message](m M) M {
+	return Clone(m).(M)
+}
+
 // mergeOptions provides a namespace for merge functions, and can be
 // exported in the future if we add user-visible merge options.
 type mergeOptions struct{}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index ea154eec44..730331e666 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "public_dependency", nil)
 	case 11:
 		b = p.appendRepeatedField(b, "weak_dependency", nil)
+	case 15:
+		b = p.appendRepeatedField(b, "option_dependency", nil)
 	case 4:
 		b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
 	case 5:
@@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
 	case 10:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 11:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
 	case 5:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 6:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -398,6 +404,10 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte {
 		b = p.appendSingularField(b, "message_encoding", nil)
 	case 6:
 		b = p.appendSingularField(b, "json_format", nil)
+	case 7:
+		b = p.appendSingularField(b, "enforce_naming_style", nil)
+	case 8:
+		b = p.appendSingularField(b, "default_symbol_visibility", nil)
 	}
 	return b
 }
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
similarity index 99%
rename from hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
index 479527b58d..fe17f37220 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.21
-
 package protoreflect
 
 import (
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
deleted file mode 100644
index 0015fcb35d..0000000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package protoreflect
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/internal/pragma"
-)
-
-type (
-	stringHeader struct {
-		Data unsafe.Pointer
-		Len  int
-	}
-	sliceHeader struct {
-		Data unsafe.Pointer
-		Len  int
-		Cap  int
-	}
-	ifaceHeader struct {
-		Type unsafe.Pointer
-		Data unsafe.Pointer
-	}
-)
-
-var (
-	nilType     = typeOf(nil)
-	boolType    = typeOf(*new(bool))
-	int32Type   = typeOf(*new(int32))
-	int64Type   = typeOf(*new(int64))
-	uint32Type  = typeOf(*new(uint32))
-	uint64Type  = typeOf(*new(uint64))
-	float32Type = typeOf(*new(float32))
-	float64Type = typeOf(*new(float64))
-	stringType  = typeOf(*new(string))
-	bytesType   = typeOf(*new([]byte))
-	enumType    = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t any) unsafe.Pointer {
-	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
-	pragma.DoNotCompare // 0B
-
-	// typ stores the type of the value as a pointer to the Go type.
-	typ unsafe.Pointer // 8B
-
-	// ptr stores the data pointer for a String, Bytes, or interface value.
-	ptr unsafe.Pointer // 8B
-
-	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
-	// Enum value as a raw uint64.
-	//
-	// It is also used to store the length of a String or Bytes value;
-	// the capacity is ignored.
-	num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
-	p := (*stringHeader)(unsafe.Pointer(&v))
-	return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
-	p := (*sliceHeader)(unsafe.Pointer(&v))
-	return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfIface(v any) Value {
-	p := (*ifaceHeader)(unsafe.Pointer(&v))
-	return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() (x string) {
-	*(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
-	return x
-}
-func (v Value) getBytes() (x []byte) {
-	*(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
-	return x
-}
-func (v Value) getIface() (x any) {
-	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
-	return x
-}
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index a516337674..4eacb523c3 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
 }
 
+// Describes the 'visibility' of a symbol with respect to the proto import
+// system. Symbols can only be imported when the visibility rules do not prevent
+// it (ex: local symbols cannot be imported).  Visibility modifiers can only set
+// on `message` and `enum` as they are the only types available to be referenced
+// from other files.
+type SymbolVisibility int32
+
+const (
+	SymbolVisibility_VISIBILITY_UNSET  SymbolVisibility = 0
+	SymbolVisibility_VISIBILITY_LOCAL  SymbolVisibility = 1
+	SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
+)
+
+// Enum value maps for SymbolVisibility.
+var (
+	SymbolVisibility_name = map[int32]string{
+		0: "VISIBILITY_UNSET",
+		1: "VISIBILITY_LOCAL",
+		2: "VISIBILITY_EXPORT",
+	}
+	SymbolVisibility_value = map[string]int32{
+		"VISIBILITY_UNSET":  0,
+		"VISIBILITY_LOCAL":  1,
+		"VISIBILITY_EXPORT": 2,
+	}
+)
+
+func (x SymbolVisibility) Enum() *SymbolVisibility {
+	p := new(SymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x SymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (SymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x SymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = SymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use SymbolVisibility.Descriptor instead.
+func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
 // The verification state of the extension range.
 type ExtensionRangeOptions_VerificationState int32
 
@@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
 }
 
 func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
 }
 
 func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+	return &file_google_protobuf_descriptor_proto_enumTypes[2]
 }
 
 func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string {
 }
 
 func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
 }
 
 func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[2]
+	return &file_google_protobuf_descriptor_proto_enumTypes[3]
 }
 
 func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string {
 }
 
 func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
 }
 
 func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[3]
+	return &file_google_protobuf_descriptor_proto_enumTypes[4]
 }
 
 func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string {
 }
 
 func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
 }
 
 func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[4]
+	return &file_google_protobuf_descriptor_proto_enumTypes[5]
 }
 
 func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string {
 }
 
 func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
 }
 
 func (FieldOptions_CType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[5]
+	return &file_google_protobuf_descriptor_proto_enumTypes[6]
 }
 
 func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string {
 }
 
 func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
 }
 
 func (FieldOptions_JSType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[6]
+	return &file_google_protobuf_descriptor_proto_enumTypes[7]
 }
 
 func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string {
 }
 
 func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
 }
 
 func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[7]
+	return &file_google_protobuf_descriptor_proto_enumTypes[8]
 }
 
 func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string {
 }
 
 func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
 }
 
 func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[8]
+	return &file_google_protobuf_descriptor_proto_enumTypes[9]
 }
 
 func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
 }
 
 func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
 }
 
 func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[9]
+	return &file_google_protobuf_descriptor_proto_enumTypes[10]
 }
 
 func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string {
 }
 
 func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
 }
 
 func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[10]
+	return &file_google_protobuf_descriptor_proto_enumTypes[11]
 }
 
 func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
@@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string {
 }
 
 func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
 }
 
 func (FeatureSet_EnumType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[11]
+	return &file_google_protobuf_descriptor_proto_enumTypes[12]
 }
 
 func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
@@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string {
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[12]
+	return &file_google_protobuf_descriptor_proto_enumTypes[13]
 }
 
 func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
@@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string {
 }
 
 func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
 }
 
 func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[13]
+	return &file_google_protobuf_descriptor_proto_enumTypes[14]
 }
 
 func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
@@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string {
 }
 
 func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
 }
 
 func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[14]
+	return &file_google_protobuf_descriptor_proto_enumTypes[15]
 }
 
 func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
@@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string {
 }
 
 func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
 }
 
 func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[15]
+	return &file_google_protobuf_descriptor_proto_enumTypes[16]
 }
 
 func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
@@ -1139,6 +1203,136 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
 }
 
+type FeatureSet_EnforceNamingStyle int32
+
+const (
+	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0
+	FeatureSet_STYLE2024                    FeatureSet_EnforceNamingStyle = 1
+	FeatureSet_STYLE_LEGACY                 FeatureSet_EnforceNamingStyle = 2
+)
+
+// Enum value maps for FeatureSet_EnforceNamingStyle.
+var (
+	FeatureSet_EnforceNamingStyle_name = map[int32]string{
+		0: "ENFORCE_NAMING_STYLE_UNKNOWN",
+		1: "STYLE2024",
+		2: "STYLE_LEGACY",
+	}
+	FeatureSet_EnforceNamingStyle_value = map[string]int32{
+		"ENFORCE_NAMING_STYLE_UNKNOWN": 0,
+		"STYLE2024":                    1,
+		"STYLE_LEGACY":                 2,
+	}
+)
+
+func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle {
+	p := new(FeatureSet_EnforceNamingStyle)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_EnforceNamingStyle) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
+}
+
+func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[17]
+}
+
+func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_EnforceNamingStyle(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead.
+func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
+}
+
+type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
+
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
+	// Default pre-EDITION_2024, all UNSET visibility are export.
+	FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
+	// All top-level symbols default to export, nested default to local.
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
+	// All symbols default to local.
+	FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
+	// All symbols local by default. Nested types cannot be exported.
+	// With special case caveat for message { enum {} reserved 1 to max; }
+	// This is the recommended setting for new protos.
+	FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
+)
+
+// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
+var (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
+		0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
+		1: "EXPORT_ALL",
+		2: "EXPORT_TOP_LEVEL",
+		3: "LOCAL_ALL",
+		4: "STRICT",
+	}
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
+		"DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
+		"EXPORT_ALL":                        1,
+		"EXPORT_TOP_LEVEL":                  2,
+		"LOCAL_ALL":                         3,
+		"STRICT":                            4,
+	}
+)
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[18]
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
+}
+
 // Represents the identified object's effect on the element in the original
 // .proto file.
 type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1177,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[16]
+	return &file_google_protobuf_descriptor_proto_enumTypes[19]
 }
 
 func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1262,6 +1456,9 @@ type FileDescriptorProto struct {
 	// Indexes of the weak imported files in the dependency list.
 	// For Google-internal migration only. Do not use.
 	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// Names of files imported by this file purely for the purpose of providing
+	// option extensions. These are excluded from the dependency list above.
+	OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
 	// All top-level definitions in this file.
 	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
 	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
@@ -1277,8 +1474,14 @@ type FileDescriptorProto struct {
 	// The supported values are "proto2", "proto3", and "editions".
 	//
 	// If `edition` is present, this value must be "editions".
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
 	// The edition of the proto file.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Edition       *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
@@ -1349,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 {
 	return nil
 }
 
+func (x *FileDescriptorProto) GetOptionDependency() []string {
+	if x != nil {
+		return x.OptionDependency
+	}
+	return nil
+}
+
 func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
 	if x != nil {
 		return x.MessageType
@@ -1419,7 +1629,9 @@ type DescriptorProto struct {
 	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved field names, which may not be used by fields in the same message.
 	// A given name may only be reserved once.
-	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1524,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string {
 	return nil
 }
 
+func (x *DescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 type ExtensionRangeOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The parser stores options it doesn't recognize here. See above.
@@ -1836,7 +2055,9 @@ type EnumDescriptorProto struct {
 	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved enum value names, which may not be reused. A given name may only
 	// be reserved once.
-	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1906,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
 	return nil
 }
 
+func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 // Describes a value within an enum.
 type EnumValueDescriptorProto struct {
 	state         protoimpl.MessageState `protogen:"open.v1"`
@@ -2212,6 +2440,9 @@ type FileOptions struct {
 	// determining the ruby package.
 	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here.
 	// See the documentation for the "Options" section above.
@@ -2482,6 +2713,9 @@ type MessageOptions struct {
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2639,7 +2873,10 @@ type FieldOptions struct {
 	// for accessors, or it will be completely ignored; in the very least, this
 	// is a formalization for deprecating fields.
 	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// DEPRECATED. DO NOT USE!
 	// For Google-internal migration only. Do not use.
+	//
+	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
 	// Indicate that the field value should not be printed out when using debug
 	// formats, e.g. when the field contains sensitive credentials.
@@ -2648,6 +2885,9 @@ type FieldOptions struct {
 	Targets         []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
 	EditionDefaults []*FieldOptions_EditionDefault  `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features       *FeatureSet                  `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
 	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
@@ -2740,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool {
 	return Default_FieldOptions_Deprecated
 }
 
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 func (x *FieldOptions) GetWeak() bool {
 	if x != nil && x.Weak != nil {
 		return *x.Weak
@@ -2799,6 +3040,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
 type OneofOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2871,6 +3115,9 @@ type EnumOptions struct {
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2958,6 +3205,9 @@ type EnumValueOptions struct {
 	// this is a formalization for deprecating enum values.
 	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
 	// Indicate that fields annotated with this enum value should not be printed
 	// out when using debug formats, e.g. when the field contains sensitive
@@ -3046,6 +3296,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
 type ServiceOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
 	// Is this service deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
@@ -3124,6 +3377,9 @@ type MethodOptions struct {
 	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -3303,16 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string {
 // be designed and implemented to handle this, hopefully before we ever hit a
 // conflict here.
 type FeatureSet struct {
-	state                 protoimpl.MessageState            `protogen:"open.v1"`
-	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
-	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
-	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
-	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
-	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
-	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
-	extensionFields       protoimpl.ExtensionFields
-	unknownFields         protoimpl.UnknownFields
-	sizeCache             protoimpl.SizeCache
+	state                   protoimpl.MessageState                                `protogen:"open.v1"`
+	FieldPresence           *FeatureSet_FieldPresence                             `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+	EnumType                *FeatureSet_EnumType                                  `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+	RepeatedFieldEncoding   *FeatureSet_RepeatedFieldEncoding                     `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+	Utf8Validation          *FeatureSet_Utf8Validation                            `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+	MessageEncoding         *FeatureSet_MessageEncoding                           `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+	JsonFormat              *FeatureSet_JsonFormat                                `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+	EnforceNamingStyle      *FeatureSet_EnforceNamingStyle                        `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+	DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
+	extensionFields         protoimpl.ExtensionFields
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
 }
 
 func (x *FeatureSet) Reset() {
@@ -3387,6 +3645,20 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
 	return FeatureSet_JSON_FORMAT_UNKNOWN
 }
 
+func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
+	if x != nil && x.EnforceNamingStyle != nil {
+		return *x.EnforceNamingStyle
+	}
+	return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
+}
+
+func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	if x != nil && x.DefaultSymbolVisibility != nil {
+		return *x.DefaultSymbolVisibility
+	}
+	return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
+}
+
 // A compiled specification for the defaults of a set of features.  These
 // messages are generated from FeatureSet extensions and can be used to seed
 // feature resolution. The resolution with this object becomes a simple search
@@ -4047,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
 	return false
 }
 
+type FeatureSet_VisibilityFeature struct {
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *FeatureSet_VisibilityFeature) Reset() {
+	*x = FeatureSet_VisibilityFeature{}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FeatureSet_VisibilityFeature) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
+
+func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
+func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
 // A map from every known edition with a unique set of defaults to its
 // defaults. Not all editions may be contained here.  For a given edition,
 // the defaults at the closest matching edition ordered at or before it should
@@ -4064,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
 	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4076,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
 func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4212,7 +4520,7 @@ type SourceCodeInfo_Location struct {
 
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4224,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string {
 func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4296,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct {
 
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4308,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
 func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4361,777 +4669,389 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio
 
 var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{
-	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
-	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
-	0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
-	0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
-	0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
-	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
-	0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
-	0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
-	0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
-	0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
-	0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
-	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
-	0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
-	0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
-	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
-	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
-	0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
-	0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
-	0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
-	0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
-	0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
-	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
-	0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
-	0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
-	0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
-	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
-	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
-	0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
-	0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
-	0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
-	0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
-	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
-	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
-	0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
-	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
-	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
-	0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
-	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
-	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
-	0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
-	0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
-	0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
-	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
-	0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
-	0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
-	0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
-	0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
-	0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
-	0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
-	0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
-	0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
-	0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
-	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
-	0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
-	0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
-	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
-	0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
-	0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
-	0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
-	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
-	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
-	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
-	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
-	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
-	0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
-	0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
-	0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
-	0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
-	0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
-	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
-	0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
-	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
-	0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
-	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
-	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
-	0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
-	0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
-	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
-	0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
-	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
-	0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
-	0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
-	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
-	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
-	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
-	0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
-	0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
-	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
-	0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
-	0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
-	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
-	0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
-	0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
-	0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
-	0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
-	0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
-	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
-	0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
-	0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
-	0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
-	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
-	0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
-	0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
-	0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
-	0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
-	0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
-	0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
-	0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
-	0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
-	0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
-	0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
-	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
-	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
-	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
-	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
-	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
-	0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
-	0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
-	0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
-	0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
-	0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
-	0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
-	0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
-	0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
-	0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
-	0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
-	0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
-	0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
-	0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
-	0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
-	0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
-	0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
-	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
-	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
-	0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
-	0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
-	0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
-	0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
-	0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
-	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
-	0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
-	0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
-	0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
-	0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
-	0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
-	0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
-	0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
-	0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
-	0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
-	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
-	0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
-	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
-	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
-	0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
-	0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
-	0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
-	0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
-	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
-	0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
-	0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
-	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
-	0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
-	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
-	0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
-	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
-	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
-	0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
-	0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
-	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
-	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
-	0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
-	0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
-	0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
-	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
-	0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
-	0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
-	0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
-	0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
-	0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
-	0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
-	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
-	0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
-	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
-	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
-	0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
-	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
-	0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
-	0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
-	0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
-	0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
-	0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
-	0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
-	0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
-	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
-	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
-	0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
-	0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
-	0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
-	0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
-	0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
-	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
-	0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
-	0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
-	0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
-	0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
-	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
-	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
-	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
-	0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
-	0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
-	0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
-	0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
-	0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
-	0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
-	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
-	0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
-	0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
-	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
-	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
-	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
-	0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
-	0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
-	0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
-	0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
-	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
-	0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
-	0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
-	0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
-	0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
-	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
-	0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
-	0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
-	0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
-	0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
-	0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
-	0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
-	0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
-	0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
-	0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
-	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
-	0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
-	0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
-	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
-	0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
-	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
-	0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
-	0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
-	0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
-	0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
-	0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
-	0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
-	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
-	0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
-	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
-	0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
-	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
-	0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
-	0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
-	0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
-	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
-	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
-	0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
-	0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
-	0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
-	0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
-	0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
-	0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
-	0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
-	0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
-	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
-	0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
-	0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
-	0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
-	0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
-	0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
-	0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
-	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
-	0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
-	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
-	0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
-	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
-	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
-	0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
-	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
-	0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
-	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
-	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
-	0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
-	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
-	0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
-	0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
-	0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
-	0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
-	0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
-	0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
-	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
-	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
-	0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
-	0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
-	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
-	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
-	0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
-	0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
-	0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
-	0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
-	0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
-	0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
-	0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
-	0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
-	0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
-	0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
-	0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
-	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
-	0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
-	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
-	0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
-	0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
-	0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
-	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
-	0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
-	0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
-	0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
-	0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
-	0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
-	0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
-	0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
-	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
-	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
-	0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
-	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
-	0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
-	0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
-	0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
-	0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
-	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
-	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
-	0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
-	0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
-	0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
-	0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
-	0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
-	0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
-	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
-	0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
-	0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-})
+const file_google_protobuf_descriptor_proto_rawDesc = "" +
+	"\n" +
+	" google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
+	"\x11FileDescriptorSet\x128\n" +
+	"\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
+	"\x13FileDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
+	"\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
+	"\n" +
+	"dependency\x18\x03 \x03(\tR\n" +
+	"dependency\x12+\n" +
+	"\x11public_dependency\x18\n" +
+	" \x03(\x05R\x10publicDependency\x12'\n" +
+	"\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
+	"\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
+	"\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
+	"\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
+	"\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
+	"\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" +
+	"\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
+	"\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
+	"\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
+	"\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
+	"\x0fDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
+	"\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
+	"\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" +
+	"\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" +
+	"nestedType\x12A\n" +
+	"\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" +
+	"\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" +
+	"\n" +
+	"oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" +
+	"\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
+	"\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
+	"\rreserved_name\x18\n" +
+	" \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1az\n" +
+	"\x0eExtensionRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
+	"\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" +
+	"\rReservedRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" +
+	"\x15ExtensionRangeOptions\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" +
+	"\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" +
+	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" +
+	"\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" +
+	"UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" +
+	"\vDeclaration\x12\x16\n" +
+	"\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" +
+	"\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" +
+	"\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" +
+	"\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" +
+	"\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" +
+	"\x11VerificationState\x12\x0f\n" +
+	"\vDECLARATION\x10\x00\x12\x0e\n" +
+	"\n" +
+	"UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" +
+	"\x14FieldDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+	"\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" +
+	"\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" +
+	"\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" +
+	"\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" +
+	"\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" +
+	"\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" +
+	"\voneof_index\x18\t \x01(\x05R\n" +
+	"oneofIndex\x12\x1b\n" +
+	"\tjson_name\x18\n" +
+	" \x01(\tR\bjsonName\x127\n" +
+	"\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" +
+	"\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" +
+	"\x04Type\x12\x0f\n" +
+	"\vTYPE_DOUBLE\x10\x01\x12\x0e\n" +
+	"\n" +
+	"TYPE_FLOAT\x10\x02\x12\x0e\n" +
+	"\n" +
+	"TYPE_INT64\x10\x03\x12\x0f\n" +
+	"\vTYPE_UINT64\x10\x04\x12\x0e\n" +
+	"\n" +
+	"TYPE_INT32\x10\x05\x12\x10\n" +
+	"\fTYPE_FIXED64\x10\x06\x12\x10\n" +
+	"\fTYPE_FIXED32\x10\a\x12\r\n" +
+	"\tTYPE_BOOL\x10\b\x12\x0f\n" +
+	"\vTYPE_STRING\x10\t\x12\x0e\n" +
+	"\n" +
+	"TYPE_GROUP\x10\n" +
+	"\x12\x10\n" +
+	"\fTYPE_MESSAGE\x10\v\x12\x0e\n" +
+	"\n" +
+	"TYPE_BYTES\x10\f\x12\x0f\n" +
+	"\vTYPE_UINT32\x10\r\x12\r\n" +
+	"\tTYPE_ENUM\x10\x0e\x12\x11\n" +
+	"\rTYPE_SFIXED32\x10\x0f\x12\x11\n" +
+	"\rTYPE_SFIXED64\x10\x10\x12\x0f\n" +
+	"\vTYPE_SINT32\x10\x11\x12\x0f\n" +
+	"\vTYPE_SINT64\x10\x12\"C\n" +
+	"\x05Label\x12\x12\n" +
+	"\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" +
+	"\x0eLABEL_REPEATED\x10\x03\x12\x12\n" +
+	"\x0eLABEL_REQUIRED\x10\x02\"c\n" +
+	"\x14OneofDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
+	"\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
+	"\x13EnumDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
+	"\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
+	"\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
+	"\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
+	"\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1a;\n" +
+	"\x11EnumReservedRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
+	"\x18EnumValueDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+	"\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
+	"\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" +
+	"\x16ServiceDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
+	"\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
+	"\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" +
+	"\x15MethodDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
+	"\n" +
+	"input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" +
+	"\voutput_type\x18\x03 \x01(\tR\n" +
+	"outputType\x128\n" +
+	"\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" +
+	"\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" +
+	"\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" +
+	"\vFileOptions\x12!\n" +
+	"\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" +
+	"\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" +
+	"\x13java_multiple_files\x18\n" +
+	" \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" +
+	"\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" +
+	"\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" +
+	"\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" +
+	"\n" +
+	"go_package\x18\v \x01(\tR\tgoPackage\x125\n" +
+	"\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" +
+	"\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" +
+	"\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" +
+	"\n" +
+	"deprecated\x18\x17 \x01(\b:\x05falseR\n" +
+	"deprecated\x12.\n" +
+	"\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" +
+	"\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" +
+	"\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" +
+	"\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" +
+	"\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" +
+	"\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" +
+	"\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" +
+	"\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" +
+	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" +
+	"\fOptimizeMode\x12\t\n" +
+	"\x05SPEED\x10\x01\x12\r\n" +
+	"\tCODE_SIZE\x10\x02\x12\x10\n" +
+	"\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" +
+	"\x0eMessageOptions\x12<\n" +
+	"\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" +
+	"\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12\x1b\n" +
+	"\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" +
+	"&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+	"\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
+	"\"\xa1\r\n" +
+	"\fFieldOptions\x12A\n" +
+	"\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
+	"\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
+	"\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" +
+	"\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" +
+	"\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12\x1d\n" +
+	"\x04weak\x18\n" +
+	" \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
+	"\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
+	"\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
+	"\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
+	"\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" +
+	"\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" +
+	"\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" +
+	"\x0eEditionDefault\x122\n" +
+	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" +
+	"\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" +
+	"\x0eFeatureSupport\x12G\n" +
+	"\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" +
+	"\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" +
+	"\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" +
+	"\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" +
+	"\x05CType\x12\n" +
+	"\n" +
+	"\x06STRING\x10\x00\x12\b\n" +
+	"\x04CORD\x10\x01\x12\x10\n" +
+	"\fSTRING_PIECE\x10\x02\"5\n" +
+	"\x06JSType\x12\r\n" +
+	"\tJS_NORMAL\x10\x00\x12\r\n" +
+	"\tJS_STRING\x10\x01\x12\r\n" +
+	"\tJS_NUMBER\x10\x02\"U\n" +
+	"\x0fOptionRetention\x12\x15\n" +
+	"\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" +
+	"\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" +
+	"\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" +
+	"\x10OptionTargetType\x12\x17\n" +
+	"\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" +
+	"\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" +
+	"\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" +
+	"\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" +
+	"\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" +
+	"\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" +
+	"\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" +
+	"\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" +
+	"\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" +
+	"\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" +
+	"\fOneofOptions\x127\n" +
+	"\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" +
+	"\vEnumOptions\x12\x1f\n" +
+	"\vallow_alias\x18\x02 \x01(\bR\n" +
+	"allowAlias\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12V\n" +
+	"&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+	"\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" +
+	"\x10EnumValueOptions\x12%\n" +
+	"\n" +
+	"deprecated\x18\x01 \x01(\b:\x05falseR\n" +
+	"deprecated\x127\n" +
+	"\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" +
+	"\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" +
+	"\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" +
+	"\x0eServiceOptions\x127\n" +
+	"\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" +
+	"\n" +
+	"deprecated\x18! \x01(\b:\x05falseR\n" +
+	"deprecated\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" +
+	"\rMethodOptions\x12%\n" +
+	"\n" +
+	"deprecated\x18! \x01(\b:\x05falseR\n" +
+	"deprecated\x12q\n" +
+	"\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" +
+	"\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" +
+	"\x10IdempotencyLevel\x12\x17\n" +
+	"\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" +
+	"\n" +
+	"IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" +
+	"\x13UninterpretedOption\x12A\n" +
+	"\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" +
+	"\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" +
+	"\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" +
+	"\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" +
+	"\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" +
+	"\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" +
+	"\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
+	"\bNamePart\x12\x1b\n" +
+	"\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
+	"\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
+	"\n" +
+	"FeatureSet\x12\x91\x01\n" +
+	"\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
+	"\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" +
+	"\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" +
+	"\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" +
+	"\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" +
+	"\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
+	"\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
+	"jsonFormat\x12\xab\x01\n" +
+	"\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
+	"\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
+	"EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
+	"\x11VisibilityFeature\"\x81\x01\n" +
+	"\x17DefaultSymbolVisibility\x12%\n" +
+	"!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
+	"\n" +
+	"EXPORT_ALL\x10\x01\x12\x14\n" +
+	"\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
+	"\tLOCAL_ALL\x10\x03\x12\n" +
+	"\n" +
+	"\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
+	"\rFieldPresence\x12\x1a\n" +
+	"\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
+	"\bEXPLICIT\x10\x01\x12\f\n" +
+	"\bIMPLICIT\x10\x02\x12\x13\n" +
+	"\x0fLEGACY_REQUIRED\x10\x03\"7\n" +
+	"\bEnumType\x12\x15\n" +
+	"\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" +
+	"\x04OPEN\x10\x01\x12\n" +
+	"\n" +
+	"\x06CLOSED\x10\x02\"V\n" +
+	"\x15RepeatedFieldEncoding\x12#\n" +
+	"\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" +
+	"\n" +
+	"\x06PACKED\x10\x01\x12\f\n" +
+	"\bEXPANDED\x10\x02\"I\n" +
+	"\x0eUtf8Validation\x12\x1b\n" +
+	"\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" +
+	"\n" +
+	"\x06VERIFY\x10\x02\x12\b\n" +
+	"\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" +
+	"\x0fMessageEncoding\x12\x1c\n" +
+	"\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" +
+	"\tDELIMITED\x10\x02\"H\n" +
+	"\n" +
+	"JsonFormat\x12\x17\n" +
+	"\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" +
+	"\x05ALLOW\x10\x01\x12\x16\n" +
+	"\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" +
+	"\x12EnforceNamingStyle\x12 \n" +
+	"\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" +
+	"\tSTYLE2024\x10\x01\x12\x10\n" +
+	"\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" +
+	"\x12FeatureSetDefaults\x12X\n" +
+	"\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" +
+	"\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" +
+	"\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" +
+	"\x18FeatureSetEditionDefault\x122\n" +
+	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" +
+	"\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" +
+	"\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" +
+	"\x0eSourceCodeInfo\x12D\n" +
+	"\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" +
+	"\bLocation\x12\x16\n" +
+	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" +
+	"\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" +
+	"\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" +
+	"\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" +
+	"\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" +
+	"\x11GeneratedCodeInfo\x12M\n" +
+	"\n" +
+	"annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" +
+	"annotation\x1a\xeb\x01\n" +
+	"\n" +
+	"Annotation\x12\x16\n" +
+	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" +
+	"\vsource_file\x18\x02 \x01(\tR\n" +
+	"sourceFile\x12\x14\n" +
+	"\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" +
+	"\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" +
+	"\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" +
+	"\bSemantic\x12\b\n" +
+	"\x04NONE\x10\x00\x12\a\n" +
+	"\x03SET\x10\x01\x12\t\n" +
+	"\x05ALIAS\x10\x02*\xa7\x02\n" +
+	"\aEdition\x12\x13\n" +
+	"\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
+	"\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
+	"\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
+	"\fEDITION_2023\x10\xe8\a\x12\x11\n" +
+	"\fEDITION_2024\x10\xe9\a\x12\x17\n" +
+	"\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
+	"\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
+	"\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
+	"\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
+	"\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
+	"\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
+	"\x10SymbolVisibility\x12\x14\n" +
+	"\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
+	"\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
+	"\x11VISIBILITY_EXPORT\x10\x02B~\n" +
+	"\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
 
 var (
 	file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
@@ -5145,143 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
 	return file_google_protobuf_descriptor_proto_rawDescData
 }
 
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
 var file_google_protobuf_descriptor_proto_goTypes = []any{
-	(Edition)(0), // 0: google.protobuf.Edition
-	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
-	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
-	(FieldDescriptorProto_Label)(0),                     // 3: google.protobuf.FieldDescriptorProto.Label
-	(FileOptions_OptimizeMode)(0),                       // 4: google.protobuf.FileOptions.OptimizeMode
-	(FieldOptions_CType)(0),                             // 5: google.protobuf.FieldOptions.CType
-	(FieldOptions_JSType)(0),                            // 6: google.protobuf.FieldOptions.JSType
-	(FieldOptions_OptionRetention)(0),                   // 7: google.protobuf.FieldOptions.OptionRetention
-	(FieldOptions_OptionTargetType)(0),                  // 8: google.protobuf.FieldOptions.OptionTargetType
-	(MethodOptions_IdempotencyLevel)(0),                 // 9: google.protobuf.MethodOptions.IdempotencyLevel
-	(FeatureSet_FieldPresence)(0),                       // 10: google.protobuf.FeatureSet.FieldPresence
-	(FeatureSet_EnumType)(0),                            // 11: google.protobuf.FeatureSet.EnumType
-	(FeatureSet_RepeatedFieldEncoding)(0),               // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
-	(FeatureSet_Utf8Validation)(0),                      // 13: google.protobuf.FeatureSet.Utf8Validation
-	(FeatureSet_MessageEncoding)(0),                     // 14: google.protobuf.FeatureSet.MessageEncoding
-	(FeatureSet_JsonFormat)(0),                          // 15: google.protobuf.FeatureSet.JsonFormat
-	(GeneratedCodeInfo_Annotation_Semantic)(0),          // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	(*FileDescriptorSet)(nil),                           // 17: google.protobuf.FileDescriptorSet
-	(*FileDescriptorProto)(nil),                         // 18: google.protobuf.FileDescriptorProto
-	(*DescriptorProto)(nil),                             // 19: google.protobuf.DescriptorProto
-	(*ExtensionRangeOptions)(nil),                       // 20: google.protobuf.ExtensionRangeOptions
-	(*FieldDescriptorProto)(nil),                        // 21: google.protobuf.FieldDescriptorProto
-	(*OneofDescriptorProto)(nil),                        // 22: google.protobuf.OneofDescriptorProto
-	(*EnumDescriptorProto)(nil),                         // 23: google.protobuf.EnumDescriptorProto
-	(*EnumValueDescriptorProto)(nil),                    // 24: google.protobuf.EnumValueDescriptorProto
-	(*ServiceDescriptorProto)(nil),                      // 25: google.protobuf.ServiceDescriptorProto
-	(*MethodDescriptorProto)(nil),                       // 26: google.protobuf.MethodDescriptorProto
-	(*FileOptions)(nil),                                 // 27: google.protobuf.FileOptions
-	(*MessageOptions)(nil),                              // 28: google.protobuf.MessageOptions
-	(*FieldOptions)(nil),                                // 29: google.protobuf.FieldOptions
-	(*OneofOptions)(nil),                                // 30: google.protobuf.OneofOptions
-	(*EnumOptions)(nil),                                 // 31: google.protobuf.EnumOptions
-	(*EnumValueOptions)(nil),                            // 32: google.protobuf.EnumValueOptions
-	(*ServiceOptions)(nil),                              // 33: google.protobuf.ServiceOptions
-	(*MethodOptions)(nil),                               // 34: google.protobuf.MethodOptions
-	(*UninterpretedOption)(nil),                         // 35: google.protobuf.UninterpretedOption
-	(*FeatureSet)(nil),                                  // 36: google.protobuf.FeatureSet
-	(*FeatureSetDefaults)(nil),                          // 37: google.protobuf.FeatureSetDefaults
-	(*SourceCodeInfo)(nil),                              // 38: google.protobuf.SourceCodeInfo
-	(*GeneratedCodeInfo)(nil),                           // 39: google.protobuf.GeneratedCodeInfo
-	(*DescriptorProto_ExtensionRange)(nil),              // 40: google.protobuf.DescriptorProto.ExtensionRange
-	(*DescriptorProto_ReservedRange)(nil),               // 41: google.protobuf.DescriptorProto.ReservedRange
-	(*ExtensionRangeOptions_Declaration)(nil),           // 42: google.protobuf.ExtensionRangeOptions.Declaration
-	(*EnumDescriptorProto_EnumReservedRange)(nil),       // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
-	(*FieldOptions_EditionDefault)(nil),                 // 44: google.protobuf.FieldOptions.EditionDefault
-	(*FieldOptions_FeatureSupport)(nil),                 // 45: google.protobuf.FieldOptions.FeatureSupport
-	(*UninterpretedOption_NamePart)(nil),                // 46: google.protobuf.UninterpretedOption.NamePart
-	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	(*SourceCodeInfo_Location)(nil),                     // 48: google.protobuf.SourceCodeInfo.Location
-	(*GeneratedCodeInfo_Annotation)(nil),                // 49: google.protobuf.GeneratedCodeInfo.Annotation
+	(Edition)(0),          // 0: google.protobuf.Edition
+	(SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
+	(ExtensionRangeOptions_VerificationState)(0),              // 2: google.protobuf.ExtensionRangeOptions.VerificationState
+	(FieldDescriptorProto_Type)(0),                            // 3: google.protobuf.FieldDescriptorProto.Type
+	(FieldDescriptorProto_Label)(0),                           // 4: google.protobuf.FieldDescriptorProto.Label
+	(FileOptions_OptimizeMode)(0),                             // 5: google.protobuf.FileOptions.OptimizeMode
+	(FieldOptions_CType)(0),                                   // 6: google.protobuf.FieldOptions.CType
+	(FieldOptions_JSType)(0),                                  // 7: google.protobuf.FieldOptions.JSType
+	(FieldOptions_OptionRetention)(0),                         // 8: google.protobuf.FieldOptions.OptionRetention
+	(FieldOptions_OptionTargetType)(0),                        // 9: google.protobuf.FieldOptions.OptionTargetType
+	(MethodOptions_IdempotencyLevel)(0),                       // 10: google.protobuf.MethodOptions.IdempotencyLevel
+	(FeatureSet_FieldPresence)(0),                             // 11: google.protobuf.FeatureSet.FieldPresence
+	(FeatureSet_EnumType)(0),                                  // 12: google.protobuf.FeatureSet.EnumType
+	(FeatureSet_RepeatedFieldEncoding)(0),                     // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
+	(FeatureSet_Utf8Validation)(0),                            // 14: google.protobuf.FeatureSet.Utf8Validation
+	(FeatureSet_MessageEncoding)(0),                           // 15: google.protobuf.FeatureSet.MessageEncoding
+	(FeatureSet_JsonFormat)(0),                                // 16: google.protobuf.FeatureSet.JsonFormat
+	(FeatureSet_EnforceNamingStyle)(0),                        // 17: google.protobuf.FeatureSet.EnforceNamingStyle
+	(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	(GeneratedCodeInfo_Annotation_Semantic)(0),                // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	(*FileDescriptorSet)(nil),                                 // 20: google.protobuf.FileDescriptorSet
+	(*FileDescriptorProto)(nil),                               // 21: google.protobuf.FileDescriptorProto
+	(*DescriptorProto)(nil),                                   // 22: google.protobuf.DescriptorProto
+	(*ExtensionRangeOptions)(nil),                             // 23: google.protobuf.ExtensionRangeOptions
+	(*FieldDescriptorProto)(nil),                              // 24: google.protobuf.FieldDescriptorProto
+	(*OneofDescriptorProto)(nil),                              // 25: google.protobuf.OneofDescriptorProto
+	(*EnumDescriptorProto)(nil),                               // 26: google.protobuf.EnumDescriptorProto
+	(*EnumValueDescriptorProto)(nil),                          // 27: google.protobuf.EnumValueDescriptorProto
+	(*ServiceDescriptorProto)(nil),                            // 28: google.protobuf.ServiceDescriptorProto
+	(*MethodDescriptorProto)(nil),                             // 29: google.protobuf.MethodDescriptorProto
+	(*FileOptions)(nil),                                       // 30: google.protobuf.FileOptions
+	(*MessageOptions)(nil),                                    // 31: google.protobuf.MessageOptions
+	(*FieldOptions)(nil),                                      // 32: google.protobuf.FieldOptions
+	(*OneofOptions)(nil),                                      // 33: google.protobuf.OneofOptions
+	(*EnumOptions)(nil),                                       // 34: google.protobuf.EnumOptions
+	(*EnumValueOptions)(nil),                                  // 35: google.protobuf.EnumValueOptions
+	(*ServiceOptions)(nil),                                    // 36: google.protobuf.ServiceOptions
+	(*MethodOptions)(nil),                                     // 37: google.protobuf.MethodOptions
+	(*UninterpretedOption)(nil),                               // 38: google.protobuf.UninterpretedOption
+	(*FeatureSet)(nil),                                        // 39: google.protobuf.FeatureSet
+	(*FeatureSetDefaults)(nil),                                // 40: google.protobuf.FeatureSetDefaults
+	(*SourceCodeInfo)(nil),                                    // 41: google.protobuf.SourceCodeInfo
+	(*GeneratedCodeInfo)(nil),                                 // 42: google.protobuf.GeneratedCodeInfo
+	(*DescriptorProto_ExtensionRange)(nil),                    // 43: google.protobuf.DescriptorProto.ExtensionRange
+	(*DescriptorProto_ReservedRange)(nil),                     // 44: google.protobuf.DescriptorProto.ReservedRange
+	(*ExtensionRangeOptions_Declaration)(nil),                 // 45: google.protobuf.ExtensionRangeOptions.Declaration
+	(*EnumDescriptorProto_EnumReservedRange)(nil),             // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
+	(*FieldOptions_EditionDefault)(nil),                       // 47: google.protobuf.FieldOptions.EditionDefault
+	(*FieldOptions_FeatureSupport)(nil),                       // 48: google.protobuf.FieldOptions.FeatureSupport
+	(*UninterpretedOption_NamePart)(nil),                      // 49: google.protobuf.UninterpretedOption.NamePart
+	(*FeatureSet_VisibilityFeature)(nil),                      // 50: google.protobuf.FeatureSet.VisibilityFeature
+	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil),       // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	(*SourceCodeInfo_Location)(nil),                           // 52: google.protobuf.SourceCodeInfo.Location
+	(*GeneratedCodeInfo_Annotation)(nil),                      // 53: google.protobuf.GeneratedCodeInfo.Annotation
 }
 var file_google_protobuf_descriptor_proto_depIdxs = []int32{
-	18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
-	19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
-	23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
-	21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
-	38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+	21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+	22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+	26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+	24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+	41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
 	0,  // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
-	21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
-	21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
-	23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
-	22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
-	28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
-	41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
-	35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
-	36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
-	1,  // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
-	3,  // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
-	2,  // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
-	29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
-	30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
-	24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
-	31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
-	43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
-	32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
-	26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
-	33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
-	34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
-	4,  // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
-	36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	5,  // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
-	6,  // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
-	7,  // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
-	8,  // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
-	44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
-	36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
-	45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
-	45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	9,  // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
-	36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
-	10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
-	11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
-	12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
-	13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
-	14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
-	15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
-	47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	0,  // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
-	0,  // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
-	48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
-	49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
-	20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
-	0,  // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
-	0,  // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
-	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
-	0,  // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
-	0,  // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
-	36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
-	36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
-	16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	77, // [77:77] is the sub-list for method output_type
-	77, // [77:77] is the sub-list for method input_type
-	77, // [77:77] is the sub-list for extension type_name
-	77, // [77:77] is the sub-list for extension extendee
-	0,  // [0:77] is the sub-list for field type_name
+	24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+	24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+	26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+	25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+	31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+	44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+	1,  // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+	39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+	2,  // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+	4,  // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+	3,  // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+	32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+	33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+	27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+	34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+	46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+	1,  // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+	29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+	36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+	37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+	5,  // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+	39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	6,  // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+	7,  // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+	8,  // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+	9,  // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+	47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+	39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+	39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+	11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+	12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+	13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+	14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+	15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+	16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+	17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+	18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	0,  // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+	0,  // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+	52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+	53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+	23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+	0,  // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+	0,  // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+	0,  // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+	0,  // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+	0,  // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+	39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+	39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+	19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	81, // [81:81] is the sub-list for method output_type
+	81, // [81:81] is the sub-list for method input_type
+	81, // [81:81] is the sub-list for extension type_name
+	81, // [81:81] is the sub-list for extension extendee
+	0,  // [0:81] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5294,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() {
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
-			NumEnums:      17,
-			NumMessages:   33,
+			NumEnums:      20,
+			NumMessages:   34,
 			NumExtensions: 0,
 			NumServices:   0,
 		},
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index 28d24bad79..37e712b6b7 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -228,63 +228,29 @@ var (
 
 var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_go_features_proto_rawDesc = string([]byte{
-	0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
-	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
-	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
-	0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
-	0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
-	0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
-	0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
-	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
-	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
-	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69,
-	0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70,
-	0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49,
-	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01,
-	0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
-	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f,
-	0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2,
-	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
-	0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72,
-	0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e,
-	0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70,
-	0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74,
-	0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a,
-	0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49,
-	0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
-	0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e,
-	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44,
-	0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45,
-	0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d,
-	0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f,
-	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50,
-	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45,
-	0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52,
-	0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54,
-	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
-	0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
-})
+const file_google_protobuf_go_features_proto_rawDesc = "" +
+	"\n" +
+	"!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" +
+	"\n" +
+	"GoFeatures\x12\xbe\x01\n" +
+	"\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" +
+	"\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" +
+	"\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" +
+	"API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" +
+	"\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" +
+	"\bAPILevel\x12\x19\n" +
+	"\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" +
+	"\bAPI_OPEN\x10\x01\x12\x0e\n" +
+	"\n" +
+	"API_HYBRID\x10\x02\x12\x0e\n" +
+	"\n" +
+	"API_OPAQUE\x10\x03\"\x92\x01\n" +
+	"\x0fStripEnumPrefix\x12!\n" +
+	"\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" +
+	"\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" +
+	"\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" +
+	"\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" +
+	"\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb"
 
 var (
 	file_google_protobuf_go_features_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 497da66e91..1ff0d1494d 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte {
 
 var File_google_protobuf_any_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_any_proto_rawDesc = string([]byte{
-	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
-	0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
-	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
-	0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
-	0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_any_proto_rawDesc = "" +
+	"\n" +
+	"\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" +
+	"\x03Any\x12\x19\n" +
+	"\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" +
+	"\x05value\x18\x02 \x01(\fR\x05valueBv\n" +
+	"\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_any_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 193880d181..ca2e7b38f4 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -289,24 +289,13 @@ func (x *Duration) GetNanos() int32 {
 
 var File_google_protobuf_duration_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_duration_proto_rawDesc = string([]byte{
-	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
-	0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
-	0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
-	0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
-	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
-	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
-	0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
-	0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_duration_proto_rawDesc = "" +
+	"\n" +
+	"\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" +
+	"\bDuration\x12\x18\n" +
+	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" +
+	"\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_duration_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index a5b8657c4b..1d7ee3b476 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -86,20 +86,12 @@ func (*Empty) Descriptor() ([]byte, []int) {
 
 var File_google_protobuf_empty_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_empty_proto_rawDesc = string([]byte{
-	0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
-	0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
-	0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
-	0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
-	0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
-	0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_empty_proto_rawDesc = "" +
+	"\n" +
+	"\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" +
+	"\x05EmptyB}\n" +
+	"\x13com.google.protobufB\n" +
+	"EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_empty_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index 041feb0f3e..91ee89a5cd 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -504,23 +504,12 @@ func (x *FieldMask) GetPaths() []string {
 
 var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{
-	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
-	0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
-	0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
-	0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
-	0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
-	0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
-	0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
-	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_field_mask_proto_rawDesc = "" +
+	"\n" +
+	" google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" +
+	"\tFieldMask\x12\x14\n" +
+	"\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" +
+	"\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index ecdd31ab53..30411b7283 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -672,55 +672,31 @@ func (x *ListValue) GetValues() []*Value {
 
 var File_google_protobuf_struct_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_struct_proto_rawDesc = string([]byte{
-	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
-	0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
-	0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
-	0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
-	0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
-	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
-	0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
-	0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
-	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
-	0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
-	0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
-	0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
-	0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
-	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
-	0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
-	0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
-	0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
-	0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
-	0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
-	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
-	0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
-	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
-	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x33,
-})
+const file_google_protobuf_struct_proto_rawDesc = "" +
+	"\n" +
+	"\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" +
+	"\x06Struct\x12;\n" +
+	"\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" +
+	"\vFieldsEntry\x12\x10\n" +
+	"\x03key\x18\x01 \x01(\tR\x03key\x12,\n" +
+	"\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" +
+	"\x05Value\x12;\n" +
+	"\n" +
+	"null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" +
+	"\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" +
+	"\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" +
+	"\n" +
+	"bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" +
+	"\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" +
+	"\n" +
+	"list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" +
+	"\x04kind\";\n" +
+	"\tListValue\x12.\n" +
+	"\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" +
+	"\tNullValue\x12\x0e\n" +
+	"\n" +
+	"NULL_VALUE\x10\x00B\x7f\n" +
+	"\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_struct_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 00ac835c0b..06d584c14b 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 {
 
 var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{
-	0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
-	0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
-	0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
-	0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
-	0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
-	0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
-	0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
-	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
-	0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_timestamp_proto_rawDesc = "" +
+	"\n" +
+	"\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
+	"\tTimestamp\x12\x18\n" +
+	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
+	"\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 5de5301063..b7c2d0607d 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -28,10 +28,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
+// Wrappers for primitive (non-message) types. These types were needed
+// for legacy reasons and are not recommended for use in new APIs.
+//
+// Historically these wrappers were useful to have presence on proto3 primitive
+// fields, but proto3 syntax has been updated to support the `optional` keyword.
+// Using that keyword is now the strongly preferred way to add presence to
+// proto3 primitive fields.
+//
+// A secondary usecase was to embed primitives in the `google.protobuf.Any`
+// type: it is now recommended that you embed your value in your own wrapper
+// message which can be specifically documented.
 //
 // These wrappers have no meaningful use within repeated fields as they lack
 // the ability to detect presence on individual elements.
@@ -54,6 +61,9 @@ import (
 // Wrapper message for `double`.
 //
 // The JSON representation for `DoubleValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type DoubleValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The double value.
@@ -107,6 +117,9 @@ func (x *DoubleValue) GetValue() float64 {
 // Wrapper message for `float`.
 //
 // The JSON representation for `FloatValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type FloatValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The float value.
@@ -160,6 +173,9 @@ func (x *FloatValue) GetValue() float32 {
 // Wrapper message for `int64`.
 //
 // The JSON representation for `Int64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type Int64Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int64 value.
@@ -213,6 +229,9 @@ func (x *Int64Value) GetValue() int64 {
 // Wrapper message for `uint64`.
 //
 // The JSON representation for `UInt64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type UInt64Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint64 value.
@@ -266,6 +285,9 @@ func (x *UInt64Value) GetValue() uint64 {
 // Wrapper message for `int32`.
 //
 // The JSON representation for `Int32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type Int32Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int32 value.
@@ -319,6 +341,9 @@ func (x *Int32Value) GetValue() int32 {
 // Wrapper message for `uint32`.
 //
 // The JSON representation for `UInt32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type UInt32Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint32 value.
@@ -372,6 +397,9 @@ func (x *UInt32Value) GetValue() uint32 {
 // Wrapper message for `bool`.
 //
 // The JSON representation for `BoolValue` is JSON `true` and `false`.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type BoolValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bool value.
@@ -425,6 +453,9 @@ func (x *BoolValue) GetValue() bool {
 // Wrapper message for `string`.
 //
 // The JSON representation for `StringValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type StringValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The string value.
@@ -478,6 +509,9 @@ func (x *StringValue) GetValue() string {
 // Wrapper message for `bytes`.
 //
 // The JSON representation for `BytesValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type BytesValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bytes value.
@@ -530,41 +564,32 @@ func (x *BytesValue) GetValue() []byte {
 
 var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{
-	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
-	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
-	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
-	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
-	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
-	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
-	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
-	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
-	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
-	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
-	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
-	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
-	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
-	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_wrappers_proto_rawDesc = "" +
+	"\n" +
+	"\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" +
+	"\vDoubleValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" +
+	"\n" +
+	"FloatValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" +
+	"\n" +
+	"Int64Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x03R\x05value\"#\n" +
+	"\vUInt64Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" +
+	"\n" +
+	"Int32Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x05R\x05value\"#\n" +
+	"\vUInt32Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\rR\x05value\"!\n" +
+	"\tBoolValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\bR\x05value\"#\n" +
+	"\vStringValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\tR\x05value\"\"\n" +
+	"\n" +
+	"BytesValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" +
+	"\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
index 91e171271a..4805d09ab5 100644
--- a/vendor/k8s.io/client-go/util/cert/cert.go
+++ b/vendor/k8s.io/client-go/util/cert/cert.go
@@ -75,13 +75,15 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro
 			CommonName:   cfg.CommonName,
 			Organization: cfg.Organization,
 		},
-		DNSNames:              []string{cfg.CommonName},
 		NotBefore:             notBefore,
 		NotAfter:              now.Add(duration365d * 10).UTC(),
 		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
 		BasicConstraintsValid: true,
 		IsCA:                  true,
 	}
+	if len(cfg.CommonName) > 0 {
+		tmpl.DNSNames = []string{cfg.CommonName}
+	}
 
 	certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
 	if err != nil {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b74232fc73..25992bac5f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -11,7 +11,7 @@ github.com/MakeNowJust/heredoc
 # github.com/Masterminds/goutils v1.1.1
 ## explicit
 github.com/Masterminds/goutils
-# github.com/Masterminds/semver/v3 v3.3.0
+# github.com/Masterminds/semver/v3 v3.4.0
 ## explicit; go 1.21
 github.com/Masterminds/semver/v3
 # github.com/Masterminds/sprig/v3 v3.3.0
@@ -70,8 +70,8 @@ github.com/cenkalti/backoff/v4
 # github.com/cespare/xxhash/v2 v2.3.0
 ## explicit; go 1.11
 github.com/cespare/xxhash/v2
-# github.com/cloudflare/circl v1.3.7
-## explicit; go 1.19
+# github.com/cloudflare/circl v1.6.1
+## explicit; go 1.22.0
 github.com/cloudflare/circl/dh/x25519
 github.com/cloudflare/circl/dh/x448
 github.com/cloudflare/circl/ecc/goldilocks
@@ -90,12 +90,13 @@ github.com/davecgh/go-spew/spew
 # github.com/distribution/reference v0.6.0
 ## explicit; go 1.20
 github.com/distribution/reference
-# github.com/docker/docker v27.3.1+incompatible
+# github.com/docker/docker v28.0.2+incompatible
 ## explicit
 github.com/docker/docker/api
 github.com/docker/docker/api/types
 github.com/docker/docker/api/types/blkiodev
 github.com/docker/docker/api/types/checkpoint
+github.com/docker/docker/api/types/common
 github.com/docker/docker/api/types/container
 github.com/docker/docker/api/types/events
 github.com/docker/docker/api/types/filters
@@ -103,6 +104,7 @@ github.com/docker/docker/api/types/image
 github.com/docker/docker/api/types/mount
 github.com/docker/docker/api/types/network
 github.com/docker/docker/api/types/registry
+github.com/docker/docker/api/types/storage
 github.com/docker/docker/api/types/strslice
 github.com/docker/docker/api/types/swarm
 github.com/docker/docker/api/types/swarm/runtime
@@ -112,6 +114,7 @@ github.com/docker/docker/api/types/versions
 github.com/docker/docker/api/types/volume
 github.com/docker/docker/client
 github.com/docker/docker/errdefs
+github.com/docker/docker/internal/lazyregexp
 github.com/docker/docker/internal/multierror
 github.com/docker/docker/pkg/stdcopy
 # github.com/docker/go-connections v0.5.0
@@ -247,7 +250,7 @@ github.com/google/safetext/yamltemplate
 # github.com/google/uuid v1.6.0
 ## explicit
 github.com/google/uuid
-# github.com/gophercloud/gophercloud/v2 v2.7.0
+# github.com/gophercloud/gophercloud/v2 v2.9.0
 ## explicit; go 1.22
 github.com/gophercloud/gophercloud/v2
 github.com/gophercloud/gophercloud/v2/openstack
@@ -303,7 +306,7 @@ github.com/gophercloud/utils/v2/openstack/compute/v2/flavors
 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
 github.com/grpc-ecosystem/grpc-gateway/v2/runtime
 github.com/grpc-ecosystem/grpc-gateway/v2/utilities
-# github.com/hashicorp/go-version v1.7.0
+# github.com/hashicorp/go-version v1.8.0
 ## explicit
 github.com/hashicorp/go-version
 # github.com/hashicorp/hcl v1.0.0
@@ -377,12 +380,13 @@ github.com/modern-go/reflect2
 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
 ## explicit
 github.com/munnerz/goautoneg
-# github.com/onsi/ginkgo/v2 v2.23.4
+# github.com/onsi/ginkgo/v2 v2.27.3
 ## explicit; go 1.23.0
 github.com/onsi/ginkgo/v2
 github.com/onsi/ginkgo/v2/config
 github.com/onsi/ginkgo/v2/formatter
 github.com/onsi/ginkgo/v2/ginkgo
+github.com/onsi/ginkgo/v2/ginkgo/automaxprocs
 github.com/onsi/ginkgo/v2/ginkgo/build
 github.com/onsi/ginkgo/v2/ginkgo/command
 github.com/onsi/ginkgo/v2/ginkgo/generators
@@ -396,10 +400,11 @@ github.com/onsi/ginkgo/v2/internal
 github.com/onsi/ginkgo/v2/internal/global
 github.com/onsi/ginkgo/v2/internal/interrupt_handler
 github.com/onsi/ginkgo/v2/internal/parallel_support
+github.com/onsi/ginkgo/v2/internal/reporters
 github.com/onsi/ginkgo/v2/internal/testingtproxy
 github.com/onsi/ginkgo/v2/reporters
 github.com/onsi/ginkgo/v2/types
-# github.com/onsi/gomega v1.37.0
+# github.com/onsi/gomega v1.38.2
 ## explicit; go 1.23.0
 github.com/onsi/gomega
 github.com/onsi/gomega/format
@@ -436,8 +441,8 @@ github.com/pelletier/go-toml/v2/unstable
 # github.com/pkg/errors v0.9.1
 ## explicit
 github.com/pkg/errors
-# github.com/prometheus/client_golang v1.22.0
-## explicit; go 1.22
+# github.com/prometheus/client_golang v1.23.2
+## explicit; go 1.23.0
 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
 github.com/prometheus/client_golang/prometheus
@@ -448,15 +453,15 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal
 github.com/prometheus/client_golang/prometheus/testutil
 github.com/prometheus/client_golang/prometheus/testutil/promlint
 github.com/prometheus/client_golang/prometheus/testutil/promlint/validations
-# github.com/prometheus/client_model v0.6.1
-## explicit; go 1.19
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.62.0
-## explicit; go 1.21
+# github.com/prometheus/common v0.66.1
+## explicit; go 1.23.0
 github.com/prometheus/common/expfmt
 github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.15.1
-## explicit; go 1.20
+# github.com/prometheus/procfs v0.16.1
+## explicit; go 1.23.0
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
 github.com/prometheus/procfs/internal/util
@@ -486,7 +491,7 @@ github.com/spf13/cast
 # github.com/spf13/cobra v1.8.1
 ## explicit; go 1.15
 github.com/spf13/cobra
-# github.com/spf13/pflag v1.0.6
+# github.com/spf13/pflag v1.0.10
 ## explicit; go 1.12
 github.com/spf13/pflag
 # github.com/spf13/viper v1.19.0
@@ -568,14 +573,8 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1
 go.opentelemetry.io/proto/otlp/common/v1
 go.opentelemetry.io/proto/otlp/resource/v1
 go.opentelemetry.io/proto/otlp/trace/v1
-# go.uber.org/automaxprocs v1.6.0
-## explicit; go 1.20
-go.uber.org/automaxprocs
-go.uber.org/automaxprocs/internal/cgroups
-go.uber.org/automaxprocs/internal/runtime
-go.uber.org/automaxprocs/maxprocs
-# go.uber.org/mock v0.5.2
-## explicit; go 1.23
+# go.uber.org/mock v0.6.0
+## explicit; go 1.23.0
 go.uber.org/mock/gomock
 go.uber.org/mock/mockgen/model
 # go.uber.org/multierr v1.11.0
@@ -592,12 +591,20 @@ go.uber.org/zap/internal/exit
 go.uber.org/zap/internal/pool
 go.uber.org/zap/internal/stacktrace
 go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.39.0
+# go.yaml.in/yaml/v2 v2.4.2
+## explicit; go 1.15
+go.yaml.in/yaml/v2
+# go.yaml.in/yaml/v3 v3.0.4
+## explicit; go 1.16
+go.yaml.in/yaml/v3
+# golang.org/x/crypto v0.41.0
 ## explicit; go 1.23.0
 golang.org/x/crypto/bcrypt
 golang.org/x/crypto/blowfish
 golang.org/x/crypto/cast5
 golang.org/x/crypto/chacha20
+golang.org/x/crypto/cryptobyte
+golang.org/x/crypto/cryptobyte/asn1
 golang.org/x/crypto/curve25519
 golang.org/x/crypto/hkdf
 golang.org/x/crypto/internal/alias
@@ -615,12 +622,12 @@ golang.org/x/exp/slices
 golang.org/x/exp/slog
 golang.org/x/exp/slog/internal
 golang.org/x/exp/slog/internal/buffer
-# golang.org/x/mod v0.25.0
+# golang.org/x/mod v0.27.0
 ## explicit; go 1.23.0
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.40.0
+# golang.org/x/net v0.43.0
 ## explicit; go 1.23.0
 golang.org/x/net/html
 golang.org/x/net/html/atom
@@ -633,25 +640,25 @@ golang.org/x/net/internal/httpcommon
 golang.org/x/net/internal/timeseries
 golang.org/x/net/trace
 golang.org/x/net/websocket
-# golang.org/x/oauth2 v0.24.0
-## explicit; go 1.18
+# golang.org/x/oauth2 v0.30.0
+## explicit; go 1.23.0
 golang.org/x/oauth2
 golang.org/x/oauth2/internal
-# golang.org/x/sync v0.15.0
+# golang.org/x/sync v0.16.0
 ## explicit; go 1.23.0
 golang.org/x/sync/errgroup
 golang.org/x/sync/singleflight
-# golang.org/x/sys v0.33.0
+# golang.org/x/sys v0.35.0
 ## explicit; go 1.23.0
 golang.org/x/sys/cpu
 golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
 golang.org/x/sys/windows/registry
-# golang.org/x/term v0.32.0
+# golang.org/x/term v0.34.0
 ## explicit; go 1.23.0
 golang.org/x/term
-# golang.org/x/text v0.26.0
+# golang.org/x/text v0.28.0
 ## explicit; go 1.23.0
 golang.org/x/text/cases
 golang.org/x/text/encoding
@@ -686,10 +693,11 @@ golang.org/x/text/width
 # golang.org/x/time v0.5.0
 ## explicit; go 1.18
 golang.org/x/time/rate
-# golang.org/x/tools v0.33.0
+# golang.org/x/tools v0.36.0
 ## explicit; go 1.23.0
 golang.org/x/tools/cover
 golang.org/x/tools/go/ast/astutil
+golang.org/x/tools/go/ast/edge
 golang.org/x/tools/go/ast/inspector
 golang.org/x/tools/go/gcexportdata
 golang.org/x/tools/go/packages
@@ -697,7 +705,6 @@ golang.org/x/tools/go/types/objectpath
 golang.org/x/tools/go/types/typeutil
 golang.org/x/tools/imports
 golang.org/x/tools/internal/aliases
-golang.org/x/tools/internal/astutil/edge
 golang.org/x/tools/internal/event
 golang.org/x/tools/internal/event/core
 golang.org/x/tools/internal/event/keys
@@ -713,6 +720,8 @@ golang.org/x/tools/internal/stdlib
 golang.org/x/tools/internal/typeparams
 golang.org/x/tools/internal/typesinternal
 golang.org/x/tools/internal/versions
+# golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated
+## explicit; go 1.23.0
 # gomodules.xyz/jsonpatch/v2 v2.4.0
 ## explicit; go 1.20
 gomodules.xyz/jsonpatch/v2
@@ -779,8 +788,8 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.5
-## explicit; go 1.21
+# google.golang.org/protobuf v1.36.8
+## explicit; go 1.23
 google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
@@ -839,7 +848,7 @@ gopkg.in/yaml.v2
 # gopkg.in/yaml.v3 v3.0.1
 ## explicit
 gopkg.in/yaml.v3
-# k8s.io/api v0.31.9
+# k8s.io/api v0.31.14
 ## explicit; go 1.22.0
 k8s.io/api/admission/v1
 k8s.io/api/admission/v1beta1
@@ -898,7 +907,7 @@ k8s.io/api/storage/v1
 k8s.io/api/storage/v1alpha1
 k8s.io/api/storage/v1beta1
 k8s.io/api/storagemigration/v1alpha1
-# k8s.io/apiextensions-apiserver v0.31.9
+# k8s.io/apiextensions-apiserver v0.31.14
 ## explicit; go 1.22.0
 k8s.io/apiextensions-apiserver/pkg/apihelpers
 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
@@ -919,7 +928,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme
 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1
 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1
 k8s.io/apiextensions-apiserver/pkg/features
-# k8s.io/apimachinery v0.31.9
+# k8s.io/apimachinery v0.31.14
 ## explicit; go 1.22.0
 k8s.io/apimachinery/pkg/api/apitesting
 k8s.io/apimachinery/pkg/api/apitesting/fuzzer
@@ -984,7 +993,7 @@ k8s.io/apimachinery/pkg/version
 k8s.io/apimachinery/pkg/watch
 k8s.io/apimachinery/third_party/forked/golang/json
 k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/apiserver v0.31.9
+# k8s.io/apiserver v0.31.14
 ## explicit; go 1.22.0
 k8s.io/apiserver/pkg/admission
 k8s.io/apiserver/pkg/apis/apiserver
@@ -1048,7 +1057,7 @@ k8s.io/apiserver/pkg/warning
 k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
 k8s.io/apiserver/plugin/pkg/authorizer/webhook
 k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics
-# k8s.io/client-go v0.31.9
+# k8s.io/client-go v0.31.14
 ## explicit; go 1.22.0
 k8s.io/client-go/applyconfigurations/admissionregistration/v1
 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
@@ -1321,7 +1330,7 @@ k8s.io/client-go/util/workqueue
 ## explicit; go 1.22.0
 k8s.io/cluster-bootstrap/token/api
 k8s.io/cluster-bootstrap/token/util
-# k8s.io/code-generator v0.31.9
+# k8s.io/code-generator v0.31.14
 ## explicit; go 1.22.0
 k8s.io/code-generator
 k8s.io/code-generator/cmd/applyconfiguration-gen
@@ -1357,7 +1366,7 @@ k8s.io/code-generator/cmd/register-gen/generators
 k8s.io/code-generator/pkg/namer
 k8s.io/code-generator/pkg/util
 k8s.io/code-generator/third_party/forked/golang/reflect
-# k8s.io/component-base v0.31.9
+# k8s.io/component-base v0.31.14
 ## explicit; go 1.22.0
 k8s.io/component-base/cli/flag
 k8s.io/component-base/featuregate
@@ -1440,7 +1449,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
-# sigs.k8s.io/cluster-api v1.9.8
+# sigs.k8s.io/cluster-api v1.9.11
 ## explicit; go 1.22.0
 sigs.k8s.io/cluster-api/api/v1beta1
 sigs.k8s.io/cluster-api/api/v1beta1/index
@@ -1525,7 +1534,7 @@ sigs.k8s.io/cluster-api/util/topology
 sigs.k8s.io/cluster-api/util/version
 sigs.k8s.io/cluster-api/util/yaml
 sigs.k8s.io/cluster-api/version
-# sigs.k8s.io/cluster-api/test v1.9.8
+# sigs.k8s.io/cluster-api/test v1.9.11
 ## explicit; go 1.22.0
 sigs.k8s.io/cluster-api/test/e2e
 sigs.k8s.io/cluster-api/test/e2e/internal/log
@@ -1650,7 +1659,7 @@ sigs.k8s.io/structured-merge-diff/v4/merge
 sigs.k8s.io/structured-merge-diff/v4/schema
 sigs.k8s.io/structured-merge-diff/v4/typed
 sigs.k8s.io/structured-merge-diff/v4/value
-# sigs.k8s.io/yaml v1.4.0
-## explicit; go 1.12
+# sigs.k8s.io/yaml v1.6.0
+## explicit; go 1.22
 sigs.k8s.io/yaml
 sigs.k8s.io/yaml/goyaml.v2
diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/common.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/common.go
index dfd1d42f5b..1ade969738 100644
--- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/common.go
+++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/common.go
@@ -62,7 +62,7 @@ func (c *clusterctlClient) getComponentsByName(ctx context.Context, provider str
 
 // parseProviderName defines a utility function that parses the abbreviated syntax for name[:version].
 func parseProviderName(provider string) (name string, version string, err error) {
-	t := strings.Split(strings.ToLower(provider), ":")
+	t := strings.Split(provider, ":")
 	if len(t) > 2 {
 		return "", "", errors.Errorf("invalid provider name %q. Provider name should be in the form name[:version]", provider)
 	}
@@ -71,7 +71,7 @@ func parseProviderName(provider string) (name string, version string, err error)
 		return "", "", errors.Errorf("invalid provider name %q. Provider name should be in the form name[:version] and name cannot be empty", provider)
 	}
 
-	name = t[0]
+	name = strings.ToLower(t[0])
 	if err := validateDNS1123Label(name); err != nil {
 		return "", "", errors.Wrapf(err, "invalid provider name %q. Provider name should be in the form name[:version] and the name should be valid", provider)
 	}
diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go
index c265c49ef7..159db0b33c 100644
--- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go
+++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go
@@ -538,6 +538,10 @@ func validateProvider(r Provider) error {
 		return errors.New("name value cannot be empty")
 	}
 
+	if r.Name() != strings.ToLower(r.Name()) {
+		return errors.Errorf("provider name %s must be in lower case", r.Name())
+	}
+
 	if (r.Name() == ClusterAPIProviderName) != (r.Type() == clusterctlv1.CoreProviderType) {
 		return errors.Errorf("name %s must be used with the %s type (name: %s, type: %s)", ClusterAPIProviderName, clusterctlv1.CoreProviderType, r.Name(), r.Type())
 	}
diff --git a/vendor/sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/current_state.go b/vendor/sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/current_state.go
index ef0725e92b..fdb1f4e804 100644
--- a/vendor/sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/current_state.go
+++ b/vendor/sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/current_state.go
@@ -19,6 +19,7 @@ package cluster
 import (
 	"context"
 	"fmt"
+	"strings"
 
 	"github.com/pkg/errors"
 	corev1 "k8s.io/api/core/v1"
@@ -84,7 +85,7 @@ func (r *Reconciler) getCurrentState(ctx context.Context, s *scope.Scope) (*scop
 // getCurrentInfrastructureClusterState looks for the state of the InfrastructureCluster. If a reference is set but not
 // found, either from an error or the object not being found, an error is thrown.
 func (r *Reconciler) getCurrentInfrastructureClusterState(ctx context.Context, blueprintInfrastructureClusterTemplate *unstructured.Unstructured, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) {
-	ref, err := alignRefAPIVersion(blueprintInfrastructureClusterTemplate, cluster.Spec.InfrastructureRef)
+	ref, err := alignRefAPIVersion(blueprintInfrastructureClusterTemplate, cluster.Spec.InfrastructureRef, false)
 	if err != nil {
 		return nil, errors.Wrapf(err, "failed to read %s %s", cluster.Spec.InfrastructureRef.Kind, klog.KRef(cluster.Spec.InfrastructureRef.Namespace, cluster.Spec.InfrastructureRef.Name))
 	}
@@ -109,7 +110,7 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, blueprintC
 	res := &scope.ControlPlaneState{}
 
 	// Get the control plane object.
-	ref, err := alignRefAPIVersion(blueprintControlPlane.Template, cluster.Spec.ControlPlaneRef)
+	ref, err := alignRefAPIVersion(blueprintControlPlane.Template, cluster.Spec.ControlPlaneRef, false)
 	if err != nil {
 		return nil, errors.Wrapf(err, "failed to read %s %s", cluster.Spec.ControlPlaneRef.Kind, klog.KRef(cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name))
 	}
@@ -134,7 +135,7 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, blueprintC
 	if err != nil {
 		return res, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate reference for %s %s", res.Object.GetKind(), klog.KObj(res.Object))
 	}
-	ref, err = alignRefAPIVersion(blueprintControlPlane.InfrastructureMachineTemplate, machineInfrastructureRef)
+	ref, err = alignRefAPIVersion(blueprintControlPlane.InfrastructureMachineTemplate, machineInfrastructureRef, true)
 	if err != nil {
 		return nil, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate for %s %s", res.Object.GetKind(), klog.KObj(res.Object))
 	}
@@ -229,11 +230,11 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, bluep
 				if !ok {
 					return nil, fmt.Errorf("failed to find MachineDeployment class %s in ClusterClass", mdClassName)
 				}
-				bootstrapRef, err = alignRefAPIVersion(mdBluePrint.BootstrapTemplate, bootstrapRef)
+				bootstrapRef, err = alignRefAPIVersion(mdBluePrint.BootstrapTemplate, bootstrapRef, true)
 				if err != nil {
 					return nil, errors.Wrap(err, fmt.Sprintf("MachineDeployment %s Bootstrap reference could not be retrieved", klog.KObj(m)))
 				}
-				infraRef, err = alignRefAPIVersion(mdBluePrint.InfrastructureMachineTemplate, infraRef)
+				infraRef, err = alignRefAPIVersion(mdBluePrint.InfrastructureMachineTemplate, infraRef, true)
 				if err != nil {
 					return nil, errors.Wrap(err, fmt.Sprintf("MachineDeployment %s Infrastructure reference could not be retrieved", klog.KObj(m)))
 				}
@@ -349,11 +350,11 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa
 			if !ok {
 				return nil, fmt.Errorf("failed to find MachinePool class %s in ClusterClass", mpClassName)
 			}
-			bootstrapRef, err = alignRefAPIVersion(mpBluePrint.BootstrapTemplate, bootstrapRef)
+			bootstrapRef, err = alignRefAPIVersion(mpBluePrint.BootstrapTemplate, bootstrapRef, false)
 			if err != nil {
 				return nil, errors.Wrap(err, fmt.Sprintf("MachinePool %s Bootstrap reference could not be retrieved", klog.KObj(m)))
 			}
-			infraRef, err = alignRefAPIVersion(mpBluePrint.InfrastructureMachinePoolTemplate, infraRef)
+			infraRef, err = alignRefAPIVersion(mpBluePrint.InfrastructureMachinePoolTemplate, infraRef, false)
 			if err != nil {
 				return nil, errors.Wrap(err, fmt.Sprintf("MachinePool %s Infrastructure reference could not be retrieved", klog.KObj(m)))
 			}
@@ -398,7 +399,7 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa
 // If group or kind was changed in the ClusterClass, an exact copy of the currentRef is returned because
 // it will end up in a diff and a rollout anyway.
 // Only bootstrap template refs in a ClusterClass can change their group and kind.
-func alignRefAPIVersion(templateFromClusterClass *unstructured.Unstructured, currentRef *corev1.ObjectReference) (*corev1.ObjectReference, error) {
+func alignRefAPIVersion(templateFromClusterClass *unstructured.Unstructured, currentRef *corev1.ObjectReference, isCurrentTemplate bool) (*corev1.ObjectReference, error) {
 	currentGV, err := schema.ParseGroupVersion(currentRef.APIVersion)
 	if err != nil {
 		return nil, errors.Wrapf(err, "failed to parse apiVersion: %q", currentRef.APIVersion)
@@ -406,9 +407,19 @@ func alignRefAPIVersion(templateFromClusterClass *unstructured.Unstructured, cur
 
 	apiVersion := currentRef.APIVersion
 	// Use apiVersion from ClusterClass if group and kind is the same.
-	if templateFromClusterClass.GroupVersionKind().Group == currentGV.Group &&
-		templateFromClusterClass.GetKind() == currentRef.Kind {
-		apiVersion = templateFromClusterClass.GetAPIVersion()
+	if templateFromClusterClass.GroupVersionKind().Group == currentGV.Group {
+		if isCurrentTemplate {
+			// If the current object is a template, kind has to be identical.
+			if templateFromClusterClass.GetKind() == currentRef.Kind {
+				apiVersion = templateFromClusterClass.GetAPIVersion()
+			}
+		} else {
+			// If the current object is not a template, currentRef.Kind should be the kind from CC without the Template suffix,
+			// e.g. KubeadmControlPlaneTemplate == KubeadmControlPlane
+			if strings.TrimSuffix(templateFromClusterClass.GetKind(), "Template") == currentRef.Kind {
+				apiVersion = templateFromClusterClass.GetAPIVersion()
+			}
+		}
 	}
 
 	return &corev1.ObjectReference{
diff --git a/vendor/sigs.k8s.io/cluster-api/test/e2e/self_hosted.go b/vendor/sigs.k8s.io/cluster-api/test/e2e/self_hosted.go
index b7aae9b51f..4ada7d1eea 100644
--- a/vendor/sigs.k8s.io/cluster-api/test/e2e/self_hosted.go
+++ b/vendor/sigs.k8s.io/cluster-api/test/e2e/self_hosted.go
@@ -245,6 +245,14 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
 			return selfHostedClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "kube-system"}, kubeSystem)
 		}, "5s", "100ms").Should(BeNil(), "Failed to assert self-hosted API server stability")
 
+		By("Ensure all machines have NodeRef before doing move")
+		// Ensure all machines have NodeRef before attempting to move.
+		// This prevents clusterctl move failures when machines are still provisioning.
+		framework.WaitForClusterMachineNodeRefs(ctx, framework.WaitForClusterMachineNodeRefsInput{
+			GetLister: input.BootstrapClusterProxy.GetClient(),
+			Cluster:   cluster,
+		}, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)
+
 		// Get the machines of the workloadCluster before it is moved to become self-hosted to make sure that the move did not trigger
 		// any unexpected rollouts.
 		preMoveMachineList := &unstructured.UnstructuredList{}
@@ -447,6 +455,14 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
 				return selfHostedClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "kube-system"}, kubeSystem)
 			}, "5s", "100ms").Should(BeNil(), "Failed to assert self-hosted API server stability")
 
+			By("Ensure all machines have NodeRef before doing move back")
+			// Ensure all machines have NodeRef before attempting to move back to bootstrap.
+			// This prevents clusterctl move failures when machines are still provisioning.
+			framework.WaitForClusterMachineNodeRefs(ctx, framework.WaitForClusterMachineNodeRefsInput{
+				GetLister: selfHostedClusterProxy.GetClient(),
+				Cluster:   selfHostedCluster,
+			}, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)
+
 			By("Moving the cluster back to bootstrap")
 			clusterctl.Move(ctx, clusterctl.MoveInput{
 				LogFolder:            filepath.Join(input.ArtifactFolder, "clusters", clusterResources.Cluster.Name),
diff --git a/vendor/sigs.k8s.io/cluster-api/test/infrastructure/container/docker.go b/vendor/sigs.k8s.io/cluster-api/test/infrastructure/container/docker.go
index 893090b88b..bc9d6d92a2 100644
--- a/vendor/sigs.k8s.io/cluster-api/test/infrastructure/container/docker.go
+++ b/vendor/sigs.k8s.io/cluster-api/test/infrastructure/container/docker.go
@@ -363,7 +363,7 @@ func (d *dockerRuntime) ContainerDebugInfo(ctx context.Context, containerName st
 
 // dockerContainerToContainer converts a Docker API container instance to our local
 // generic container type.
-func dockerContainerToContainer(container *types.Container) Container {
+func dockerContainerToContainer(container *dockercontainer.Summary) Container {
 	return Container{
 		Name:   strings.Trim(container.Names[0], "/"),
 		Image:  container.Image,
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
deleted file mode 100644
index 54ed8f9cb9..0000000000
--- a/vendor/sigs.k8s.io/yaml/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-arch: arm64
-dist: focal
-go: 1.15.x
-script:
-  - diff -u <(echo -n) <(gofmt -d *.go)
-  - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
-  - GO111MODULE=on go vet .
-  - GO111MODULE=on go test -v -race ./...
-  - git diff --exit-code
-install:
-  - GO111MODULE=off go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
deleted file mode 100644
index 73be0a3a9b..0000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
+++ /dev/null
@@ -1,24 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-approvers:
-- dims
-- jpbetz
-- smarterclayton
-- deads2k
-- sttts
-- liggitt
-- natasha41575
-- knverey
-reviewers:
-- dims
-- thockin
-- jpbetz
-- smarterclayton
-- deads2k
-- derekwaynecarr
-- mikedanese
-- liggitt
-- sttts
-- tallclair
-labels:
-- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
index 53f4139dc3..9a8f1e6782 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
+++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
@@ -1,143 +1,71 @@
-# go-yaml fork
+# goyaml.v2
 
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
+This package provides type and function aliases for the `go.yaml.in/yaml/v2` package (which is compatible with `gopkg.in/yaml.v2`).
 
-This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
+## Purpose
 
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.1 and 1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v2*.
-
-To install it, run:
-
-    go get gopkg.in/yaml.v2
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
-  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
-
-API stability
--------------
-
-The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
-        "fmt"
-        "log"
-
-        "gopkg.in/yaml.v2"
-)
-
-var data = `
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-`
-
-// Note: struct fields must be public in order for unmarshal to
-// correctly populate the data.
-type T struct {
-        A string
-        B struct {
-                RenamedC int   `yaml:"c"`
-                D        []int `yaml:",flow"`
-        }
-}
-
-func main() {
-        t := T{}
-    
-        err := yaml.Unmarshal([]byte(data), &t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t:\n%v\n\n", t)
-    
-        d, err := yaml.Marshal(&t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t dump:\n%s\n\n", string(d))
-    
-        m := make(map[interface{}]interface{})
-    
-        err = yaml.Unmarshal([]byte(data), &m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m:\n%v\n\n", m)
-    
-        d, err = yaml.Marshal(&m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
+The purpose of this package is to:
 
-This example will generate the following output:
+1. Provide a transition path for users migrating from the sigs.k8s.io/yaml package to direct usage of go.yaml.in/yaml/v2
+2. Maintain compatibility with existing code while encouraging migration to the upstream package
+3. Reduce maintenance overhead by delegating to the upstream implementation
 
+## Usage
+
+Instead of importing this package directly, you should migrate to using `go.yaml.in/yaml/v2` directly:
+
+```go
+// Old way
+import "sigs.k8s.io/yaml/goyaml.v2"
+
+// Recommended way
+import "go.yaml.in/yaml/v2"
 ```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
-  c: 2
-  d:
-  - 3
-  - 4
-```
 
+## Available Types and Functions
+
+All public types and functions from `go.yaml.in/yaml/v2` are available through this package:
+
+### Types
+
+- `MapSlice` - Encodes and decodes as a YAML map with preserved key order
+- `MapItem` - An item in a MapSlice
+- `Unmarshaler` - Interface for custom unmarshaling behavior
+- `Marshaler` - Interface for custom marshaling behavior
+- `IsZeroer` - Interface to check if an object is zero
+- `Decoder` - Reads and decodes YAML values from an input stream
+- `Encoder` - Writes YAML values to an output stream
+- `TypeError` - Error returned by Unmarshal for decoding issues
+
+### Functions
+
+- `Unmarshal` - Decodes YAML data into a Go value
+- `UnmarshalStrict` - Like Unmarshal but errors on unknown fields
+- `Marshal` - Serializes a Go value into YAML
+- `NewDecoder` - Creates a new Decoder
+- `NewEncoder` - Creates a new Encoder
+- `FutureLineWrap` - Controls line wrapping behavior
+
+## Migration Guide
+
+To migrate from this package to `go.yaml.in/yaml/v2`:
+
+1. Update your import statements:
+   ```go
+   // From
+   import "sigs.k8s.io/yaml/goyaml.v2"
+   
+   // To
+   import "go.yaml.in/yaml/v2"
+   ```
+
+2. No code changes should be necessary as the API is identical
+
+3. Update your go.mod file to include the dependency:
+   ```
+   require go.yaml.in/yaml/v2 v2.4.2
+   ```
+
+## Deprecation Notice
+
+All types and functions in this package are marked as deprecated. You should migrate to using `go.yaml.in/yaml/v2` directly.
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go
new file mode 100644
index 0000000000..8c82bc2cb9
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import (
+	gopkg_yaml "go.yaml.in/yaml/v2"
+)
+
+// Type aliases for public types from go.yaml.in/yaml/v2
+type (
+	// MapSlice encodes and decodes as a YAML map.
+	// The order of keys is preserved when encoding and decoding.
+	// Deprecated: Use go.yaml.in/yaml/v2.MapSlice directly.
+	MapSlice = gopkg_yaml.MapSlice
+
+	// MapItem is an item in a MapSlice.
+	// Deprecated: Use go.yaml.in/yaml/v2.MapItem directly.
+	MapItem = gopkg_yaml.MapItem
+
+	// Unmarshaler is implemented by types to customize their behavior when being unmarshaled from a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v2.Unmarshaler directly.
+	Unmarshaler = gopkg_yaml.Unmarshaler
+
+	// Marshaler is implemented by types to customize their behavior when being marshaled into a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v2.Marshaler directly.
+	Marshaler = gopkg_yaml.Marshaler
+
+	// IsZeroer is used to check whether an object is zero to determine whether it should be omitted when
+	// marshaling with the omitempty flag. One notable implementation is time.Time.
+	// Deprecated: Use go.yaml.in/yaml/v2.IsZeroer directly.
+	IsZeroer = gopkg_yaml.IsZeroer
+
+	// Decoder reads and decodes YAML values from an input stream.
+	// Deprecated: Use go.yaml.in/yaml/v2.Decoder directly.
+	Decoder = gopkg_yaml.Decoder
+
+	// Encoder writes YAML values to an output stream.
+	// Deprecated: Use go.yaml.in/yaml/v2.Encoder directly.
+	Encoder = gopkg_yaml.Encoder
+
+	// TypeError is returned by Unmarshal when one or more fields in the YAML document cannot be properly decoded.
+	// Deprecated: Use go.yaml.in/yaml/v2.TypeError directly.
+	TypeError = gopkg_yaml.TypeError
+)
+
+// Function aliases for public functions from go.yaml.in/yaml/v2
+var (
+	// Unmarshal decodes the first document found within the in byte slice and assigns decoded values into the out value.
+	// Deprecated: Use go.yaml.in/yaml/v2.Unmarshal directly.
+	Unmarshal = gopkg_yaml.Unmarshal
+
+	// UnmarshalStrict is like Unmarshal except that any fields that are found in the data that do not have corresponding struct members will result in an error.
+	// Deprecated: Use go.yaml.in/yaml/v2.UnmarshalStrict directly.
+	UnmarshalStrict = gopkg_yaml.UnmarshalStrict
+
+	// Marshal serializes the value provided into a YAML document.
+	// Deprecated: Use go.yaml.in/yaml/v2.Marshal directly.
+	Marshal = gopkg_yaml.Marshal
+
+	// NewDecoder returns a new decoder that reads from r.
+	// Deprecated: Use go.yaml.in/yaml/v2.NewDecoder directly.
+	NewDecoder = gopkg_yaml.NewDecoder
+
+	// NewEncoder returns a new encoder that writes to w.
+	// Deprecated: Use go.yaml.in/yaml/v2.NewEncoder directly.
+	NewEncoder = gopkg_yaml.NewEncoder
+
+	// FutureLineWrap globally disables line wrapping when encoding long strings.
+	// Deprecated: Use go.yaml.in/yaml/v2.FutureLineWrap directly.
+	FutureLineWrap = gopkg_yaml.FutureLineWrap
+)
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
index fc10246bdb..aa01acd45d 100644
--- a/vendor/sigs.k8s.io/yaml/yaml.go
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -24,7 +24,7 @@ import (
 	"reflect"
 	"strconv"
 
-	"sigs.k8s.io/yaml/goyaml.v2"
+	"go.yaml.in/yaml/v2"
 )
 
 // Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference)
@@ -92,7 +92,7 @@ func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error {
 		d = opt(d)
 	}
 	if err := d.Decode(&obj); err != nil {
-		return fmt.Errorf("while decoding JSON: %v", err)
+		return fmt.Errorf("while decoding JSON: %w", err)
 	}
 	return nil
 }
@@ -417,3 +417,10 @@ func jsonToYAMLValue(j interface{}) interface{} {
 	}
 	return j
 }
+
+// DisallowUnknownFields configures the JSON decoder to error out if unknown
+// fields come along, instead of dropping them by default.
+func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
+	d.DisallowUnknownFields()
+	return d
+}
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go
deleted file mode 100644
index 94abc1719d..0000000000
--- a/vendor/sigs.k8s.io/yaml/yaml_go110.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// This file contains changes that are only compatible with go 1.10 and onwards.
-
-//go:build go1.10
-// +build go1.10
-
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import "encoding/json"
-
-// DisallowUnknownFields configures the JSON decoder to error out if unknown
-// fields come along, instead of dropping them by default.
-func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
-	d.DisallowUnknownFields()
-	return d
-}